ia64/xen-unstable

changeset 19123:254021201b1b

merge with xen-unstable.hg
author Isaku Yamahata <yamahata@valinux.co.jp>
date Fri Jan 30 10:54:13 2009 +0900 (2009-01-30)
parents 79f259a26a11 916ca93a8658
children 7029eb0930b4
files
line diff
     1.1 --- a/stubdom/stubdom-dm	Wed Jan 28 13:06:45 2009 +0900
     1.2 +++ b/stubdom/stubdom-dm	Fri Jan 30 10:54:13 2009 +0900
     1.3 @@ -15,6 +15,7 @@ domname=
     1.4  vncviewer=0
     1.5  vncpid=
     1.6  extra=
     1.7 +videoram=4
     1.8  while [ "$#" -gt 0 ];
     1.9  do
    1.10      if [ "$#" -ge 2 ];
    1.11 @@ -38,6 +39,10 @@ do
    1.12                  extra="$extra -loadvm $2";
    1.13                  shift
    1.14                  ;;
    1.15 +	    -videoram)
    1.16 +	        videoram="$2"
    1.17 +		shift
    1.18 +		;;
    1.19  	esac
    1.20      fi
    1.21      case "$1" in
    1.22 @@ -72,7 +77,7 @@ do
    1.23  	sleep 1
    1.24  done
    1.25  
    1.26 -creation="xm create -c $domname-dm target=$domid memory=32 extra=\"$extra\""
    1.27 +creation="xm create -c $domname-dm target=$domid memory=32 videoram=$videoram extra=\"$extra\""
    1.28  
    1.29  (while true ; do sleep 60 ; done) | /bin/sh -c "$creation" &
    1.30  #xterm -geometry +0+0 -e /bin/sh -c "$creation ; echo ; echo press ENTER to shut down ; read" &
     2.1 --- a/tools/python/xen/xend/image.py	Wed Jan 28 13:06:45 2009 +0900
     2.2 +++ b/tools/python/xen/xend/image.py	Fri Jan 30 10:54:13 2009 +0900
     2.3 @@ -633,6 +633,8 @@ class LinuxImageHandler(ImageHandler):
     2.4  
     2.5      def configure(self, vmConfig):
     2.6          ImageHandler.configure(self, vmConfig)
     2.7 +        self.vramsize = int(vmConfig['platform'].get('videoram',4)) * 1024
     2.8 +        self.is_stubdom = (self.kernel.find('stubdom') >= 0)
     2.9  
    2.10      def buildDomain(self):
    2.11          store_evtchn = self.vm.getStorePort()
    2.12 @@ -664,6 +666,17 @@ class LinuxImageHandler(ImageHandler):
    2.13                                flags          = self.flags,
    2.14                                vhpt           = self.vhpt)
    2.15  
    2.16 +    def getRequiredAvailableMemory(self, mem_kb):
    2.17 +        if self.is_stubdom :
    2.18 +            mem_kb += self.vramsize
    2.19 +        return mem_kb
    2.20 +
    2.21 +    def getRequiredInitialReservation(self):
    2.22 +        return self.vm.getMemoryTarget()
    2.23 +
    2.24 +    def getRequiredMaximumReservation(self):
    2.25 +        return self.vm.getMemoryMaximum()
    2.26 +
    2.27      def parseDeviceModelArgs(self, vmConfig):
    2.28          ret = ImageHandler.parseDeviceModelArgs(self, vmConfig)
    2.29          # Equivalent to old xenconsoled behaviour. Should make
     3.1 --- a/tools/python/xen/xend/server/blkif.py	Wed Jan 28 13:06:45 2009 +0900
     3.2 +++ b/tools/python/xen/xend/server/blkif.py	Fri Jan 30 10:54:13 2009 +0900
     3.3 @@ -18,6 +18,7 @@
     3.4  
     3.5  import re
     3.6  import string
     3.7 +import os
     3.8  
     3.9  from xen.util import blkif
    3.10  import xen.util.xsm.xsm as security
    3.11 @@ -35,6 +36,13 @@ class BlkifController(DevController):
    3.12          """
    3.13          DevController.__init__(self, vm)
    3.14  
    3.15 +    def _isValidProtocol(self, protocol):
    3.16 +        if protocol in ('phy', 'file', 'tap'):
    3.17 +            return True
    3.18 +
    3.19 +        return os.access('/etc/xen/scripts/block-%s' % protocol, os.X_OK)
    3.20 +
    3.21 +
    3.22      def getDeviceDetails(self, config):
    3.23          """@see DevController.getDeviceDetails"""
    3.24          uname = config.get('uname', '')
    3.25 @@ -56,10 +64,8 @@ class BlkifController(DevController):
    3.26          else:
    3.27              try:
    3.28                  (typ, params) = string.split(uname, ':', 1)
    3.29 -                if typ not in ('phy', 'file', 'tap'):
    3.30 -                    raise VmError(
    3.31 -                        'Block device must have "phy", "file" or "tap" '
    3.32 -                        'specified to type')
    3.33 +                if not self._isValidProtocol(typ):
    3.34 +                    raise VmError('Block device type "%s" is invalid.' % typ)
    3.35              except ValueError:
    3.36                  raise VmError(
    3.37                      'Block device must have physical details specified')
     4.1 --- a/tools/python/xen/xm/create.py	Wed Jan 28 13:06:45 2009 +0900
     4.2 +++ b/tools/python/xen/xm/create.py	Fri Jan 30 10:54:13 2009 +0900
     4.3 @@ -635,6 +635,8 @@ def configure_image(vals):
     4.4      if vals.root:
     4.5          cmdline_root = strip('root=', vals.root)
     4.6          config_image.append(['root', cmdline_root])
     4.7 +    if vals.videoram:
     4.8 +        config_image.append(['videoram', vals.videoram])
     4.9      if vals.extra:
    4.10          config_image.append(['args', vals.extra])
    4.11  
    4.12 @@ -884,7 +886,7 @@ def configure_hvm(config_image, vals):
    4.13      """Create the config for HVM devices.
    4.14      """
    4.15      args = [ 'device_model', 'pae', 'vcpus', 'boot', 'fda', 'fdb', 'timer_mode',
    4.16 -             'localtime', 'serial', 'stdvga', 'videoram', 'isa', 'nographic', 'soundhw',
    4.17 +             'localtime', 'serial', 'stdvga', 'isa', 'nographic', 'soundhw',
    4.18               'vnc', 'vncdisplay', 'vncunused', 'vncconsole', 'vnclisten',
    4.19               'sdl', 'display', 'xauthority', 'rtc_timeoffset', 'monitor',
    4.20               'acpi', 'apic', 'usb', 'usbdevice', 'keymap', 'pci', 'hpet',
     5.1 --- a/xen/arch/ia64/xen/domain.c	Wed Jan 28 13:06:45 2009 +0900
     5.2 +++ b/xen/arch/ia64/xen/domain.c	Fri Jan 30 10:54:13 2009 +0900
     5.3 @@ -405,6 +405,16 @@ void relinquish_vcpu_resources(struct vc
     5.4  	kill_timer(&v->arch.hlt_timer);
     5.5  }
     5.6  
     5.7 +struct domain *alloc_domain_struct(void)
     5.8 +{
     5.9 +    return xmalloc(struct domain);
    5.10 +}
    5.11 +
    5.12 +void free_domain_struct(struct domain *d)
    5.13 +{
    5.14 +    xfree(d);
    5.15 +}
    5.16 +
    5.17  struct vcpu *alloc_vcpu_struct(void)
    5.18  {
    5.19  	struct page_info *page;
    5.20 @@ -509,7 +519,7 @@ int vcpu_late_initialise(struct vcpu *v)
    5.21  
    5.22  	/* Create privregs page. */
    5.23  	order = get_order_from_shift(XMAPPEDREGS_SHIFT);
    5.24 -	v->arch.privregs = alloc_xenheap_pages(order);
    5.25 +	v->arch.privregs = alloc_xenheap_pages(order, 0);
    5.26  	if (v->arch.privregs == NULL)
    5.27  		return -ENOMEM;
    5.28  	BUG_ON(v->arch.privregs == NULL);
    5.29 @@ -578,7 +588,8 @@ int arch_domain_create(struct domain *d,
    5.30  #endif
    5.31  	if (tlb_track_create(d) < 0)
    5.32  		goto fail_nomem1;
    5.33 -	d->shared_info = alloc_xenheap_pages(get_order_from_shift(XSI_SHIFT));
    5.34 +	d->shared_info = alloc_xenheap_pages(
    5.35 +		get_order_from_shift(XSI_SHIFT), 0);
    5.36  	if (d->shared_info == NULL)
    5.37  		goto fail_nomem;
    5.38  	BUG_ON(d->shared_info == NULL);
     6.1 --- a/xen/arch/ia64/xen/mm.c	Wed Jan 28 13:06:45 2009 +0900
     6.2 +++ b/xen/arch/ia64/xen/mm.c	Fri Jan 30 10:54:13 2009 +0900
     6.3 @@ -3076,7 +3076,7 @@ void *pgtable_quicklist_alloc(void)
     6.4          clear_page(p);
     6.5          return p;
     6.6      }
     6.7 -    p = alloc_xenheap_pages(0);
     6.8 +    p = alloc_xenheap_page();
     6.9      if (p) {
    6.10          clear_page(p);
    6.11          /*
     7.1 --- a/xen/arch/ia64/xen/xenmem.c	Wed Jan 28 13:06:45 2009 +0900
     7.2 +++ b/xen/arch/ia64/xen/xenmem.c	Fri Jan 30 10:54:13 2009 +0900
     7.3 @@ -64,7 +64,7 @@ paging_init (void)
     7.4  	mpt_table_size = max_page * sizeof(unsigned long);
     7.5  	mpt_order = get_order(mpt_table_size);
     7.6  	ASSERT(mpt_order <= MAX_ORDER);
     7.7 -	if ((mpt_table = alloc_xenheap_pages(mpt_order)) == NULL)
     7.8 +	if ((mpt_table = alloc_xenheap_pages(mpt_order, 0)) == NULL)
     7.9  		panic("Not enough memory to bootstrap Xen.\n");
    7.10  
    7.11  	printk("machine to physical table: 0x%lx mpt_table_size 0x%lx\n"
     8.1 --- a/xen/arch/x86/acpi/power.c	Wed Jan 28 13:06:45 2009 +0900
     8.2 +++ b/xen/arch/x86/acpi/power.c	Fri Jan 30 10:54:13 2009 +0900
     8.3 @@ -129,20 +129,15 @@ static void acpi_sleep_prepare(u32 state
     8.4  
     8.5      wakeup_vector_va = __acpi_map_table(
     8.6          acpi_sinfo.wakeup_vector, sizeof(uint64_t));
     8.7 +
     8.8 +    /* TBoot will set resume vector itself (when it is safe to do so). */
     8.9 +    if ( tboot_in_measured_env() )
    8.10 +        return;
    8.11 +
    8.12      if ( acpi_sinfo.vector_width == 32 )
    8.13 -    {
    8.14 -            *(uint32_t *)wakeup_vector_va =
    8.15 -                tboot_in_measured_env() ?
    8.16 -                (uint32_t)g_tboot_shared->s3_tb_wakeup_entry :
    8.17 -                (uint32_t)bootsym_phys(wakeup_start);
    8.18 -    }
    8.19 +        *(uint32_t *)wakeup_vector_va = bootsym_phys(wakeup_start);
    8.20      else
    8.21 -    {
    8.22 -            *(uint64_t *)wakeup_vector_va =
    8.23 -                tboot_in_measured_env() ?
    8.24 -                (uint64_t)g_tboot_shared->s3_tb_wakeup_entry :
    8.25 -                (uint64_t)bootsym_phys(wakeup_start);
    8.26 -    }
    8.27 +        *(uint64_t *)wakeup_vector_va = bootsym_phys(wakeup_start);
    8.28  }
    8.29  
    8.30  static void acpi_sleep_post(u32 state) {}
    8.31 @@ -279,37 +274,47 @@ static int acpi_get_wake_status(void)
    8.32  
    8.33  static void tboot_sleep(u8 sleep_state)
    8.34  {
    8.35 -   uint32_t shutdown_type;
    8.36 -
    8.37 -   g_tboot_shared->acpi_sinfo.pm1a_cnt =
    8.38 -                           (uint16_t)acpi_sinfo.pm1a_cnt_blk.address;
    8.39 -   g_tboot_shared->acpi_sinfo.pm1b_cnt =
    8.40 -                           (uint16_t)acpi_sinfo.pm1b_cnt_blk.address;
    8.41 -   g_tboot_shared->acpi_sinfo.pm1a_evt =
    8.42 -                           (uint16_t)acpi_sinfo.pm1a_evt_blk.address;
    8.43 -   g_tboot_shared->acpi_sinfo.pm1b_evt =
    8.44 -                           (uint16_t)acpi_sinfo.pm1b_evt_blk.address;
    8.45 -   g_tboot_shared->acpi_sinfo.pm1a_cnt_val = acpi_sinfo.pm1a_cnt_val;
    8.46 -   g_tboot_shared->acpi_sinfo.pm1b_cnt_val = acpi_sinfo.pm1b_cnt_val;
    8.47 +    uint32_t shutdown_type;
    8.48  
    8.49 -   switch ( sleep_state )
    8.50 -   {
    8.51 -       case ACPI_STATE_S3:
    8.52 -           shutdown_type = TB_SHUTDOWN_S3;
    8.53 -           g_tboot_shared->s3_k_wakeup_entry =
    8.54 -               (uint32_t)bootsym_phys(wakeup_start);
    8.55 -           break;
    8.56 -       case ACPI_STATE_S4:
    8.57 -           shutdown_type = TB_SHUTDOWN_S4;
    8.58 -           break;
    8.59 -       case ACPI_STATE_S5:
    8.60 -           shutdown_type = TB_SHUTDOWN_S5;
    8.61 -           break;
    8.62 -       default:
    8.63 -           return;
    8.64 -   }
    8.65 +#define TB_COPY_GAS(tbg, g)             \
    8.66 +    tbg.space_id = g.space_id;          \
    8.67 +    tbg.bit_width = g.bit_width;        \
    8.68 +    tbg.bit_offset = g.bit_offset;      \
    8.69 +    tbg.access_width = g.access_width;  \
    8.70 +    tbg.address = g.address;
    8.71  
    8.72 -   tboot_shutdown(shutdown_type);
    8.73 +    /* sizes are not same (due to packing) so copy each one */
    8.74 +    TB_COPY_GAS(g_tboot_shared->acpi_sinfo.pm1a_cnt_blk,
    8.75 +                acpi_sinfo.pm1a_cnt_blk);
    8.76 +    TB_COPY_GAS(g_tboot_shared->acpi_sinfo.pm1b_cnt_blk,
    8.77 +                acpi_sinfo.pm1b_cnt_blk);
    8.78 +    TB_COPY_GAS(g_tboot_shared->acpi_sinfo.pm1a_evt_blk,
    8.79 +                acpi_sinfo.pm1a_evt_blk);
    8.80 +    TB_COPY_GAS(g_tboot_shared->acpi_sinfo.pm1b_evt_blk,
    8.81 +                acpi_sinfo.pm1b_evt_blk);
    8.82 +    g_tboot_shared->acpi_sinfo.pm1a_cnt_val = acpi_sinfo.pm1a_cnt_val;
    8.83 +    g_tboot_shared->acpi_sinfo.pm1b_cnt_val = acpi_sinfo.pm1b_cnt_val;
    8.84 +    g_tboot_shared->acpi_sinfo.wakeup_vector = acpi_sinfo.wakeup_vector;
    8.85 +    g_tboot_shared->acpi_sinfo.vector_width = acpi_sinfo.vector_width;
    8.86 +    g_tboot_shared->acpi_sinfo.kernel_s3_resume_vector =
    8.87 +                                              bootsym_phys(wakeup_start);
    8.88 +
    8.89 +    switch ( sleep_state )
    8.90 +    {
    8.91 +        case ACPI_STATE_S3:
    8.92 +            shutdown_type = TB_SHUTDOWN_S3;
    8.93 +            break;
    8.94 +        case ACPI_STATE_S4:
    8.95 +            shutdown_type = TB_SHUTDOWN_S4;
    8.96 +            break;
    8.97 +        case ACPI_STATE_S5:
    8.98 +            shutdown_type = TB_SHUTDOWN_S5;
    8.99 +            break;
   8.100 +        default:
   8.101 +            return;
   8.102 +    }
   8.103 +
   8.104 +    tboot_shutdown(shutdown_type);
   8.105  }
   8.106           
   8.107  /* System is really put into sleep state by this stub */
     9.1 --- a/xen/arch/x86/bzimage.c	Wed Jan 28 13:06:45 2009 +0900
     9.2 +++ b/xen/arch/x86/bzimage.c	Fri Jan 30 10:54:13 2009 +0900
     9.3 @@ -110,7 +110,7 @@ static  __init int perform_gunzip(char *
     9.4  
     9.5      window = (unsigned char *)output;
     9.6  
     9.7 -    free_mem_ptr = (unsigned long)alloc_xenheap_pages(HEAPORDER);
     9.8 +    free_mem_ptr = (unsigned long)alloc_xenheap_pages(HEAPORDER, 0);
     9.9      free_mem_end_ptr = free_mem_ptr + (PAGE_SIZE << HEAPORDER);
    9.10  
    9.11      inbuf = (unsigned char *)image;
    9.12 @@ -198,7 +198,7 @@ int __init bzimage_headroom(char *image_
    9.13  
    9.14      err = bzimage_check(hdr, image_length);
    9.15      if (err < 1)
    9.16 -        return err;
    9.17 +        return 0;
    9.18  
    9.19      img = image_start + (hdr->setup_sects+1) * 512;
    9.20      img += hdr->payload_offset;
    10.1 --- a/xen/arch/x86/domain.c	Wed Jan 28 13:06:45 2009 +0900
    10.2 +++ b/xen/arch/x86/domain.c	Fri Jan 30 10:54:13 2009 +0900
    10.3 @@ -162,17 +162,43 @@ void dump_pageframe_info(struct domain *
    10.4      }
    10.5  }
    10.6  
    10.7 +struct domain *alloc_domain_struct(void)
    10.8 +{
    10.9 +    struct domain *d;
   10.10 +    /*
   10.11 +     * We pack the MFN of the domain structure into a 32-bit field within
   10.12 +     * the page_info structure. Hence the MEMF_bits() restriction.
   10.13 +     */
   10.14 +    d = alloc_xenheap_pages(
   10.15 +        get_order_from_bytes(sizeof(*d)), MEMF_bits(32 + PAGE_SHIFT));
   10.16 +    if ( d != NULL )
   10.17 +        memset(d, 0, sizeof(*d));
   10.18 +    return d;
   10.19 +}
   10.20 +
   10.21 +void free_domain_struct(struct domain *d)
   10.22 +{
   10.23 +    free_xenheap_pages(d, get_order_from_bytes(sizeof(*d)));
   10.24 +}
   10.25 +
   10.26  struct vcpu *alloc_vcpu_struct(void)
   10.27  {
   10.28      struct vcpu *v;
   10.29 -    if ( (v = xmalloc(struct vcpu)) != NULL )
   10.30 +    /*
   10.31 +     * This structure contains embedded PAE PDPTEs, used when an HVM guest
   10.32 +     * runs on shadow pagetables outside of 64-bit mode. In this case the CPU
   10.33 +     * may require that the shadow CR3 points below 4GB, and hence the whole
   10.34 +     * structure must satisfy this restriction. Thus we specify MEMF_bits(32).
   10.35 +     */
   10.36 +    v = alloc_xenheap_pages(get_order_from_bytes(sizeof(*v)), MEMF_bits(32));
   10.37 +    if ( v != NULL )
   10.38          memset(v, 0, sizeof(*v));
   10.39      return v;
   10.40  }
   10.41  
   10.42  void free_vcpu_struct(struct vcpu *v)
   10.43  {
   10.44 -    xfree(v);
   10.45 +    free_xenheap_pages(v, get_order_from_bytes(sizeof(*v)));
   10.46  }
   10.47  
   10.48  #ifdef CONFIG_COMPAT
   10.49 @@ -357,7 +383,7 @@ int arch_domain_create(struct domain *d,
   10.50      INIT_LIST_HEAD(&d->arch.relmem_list);
   10.51  
   10.52      pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
   10.53 -    d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order);
   10.54 +    d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order, 0);
   10.55      if ( d->arch.mm_perdomain_pt == NULL )
   10.56          goto fail;
   10.57      memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE << pdpt_order);
   10.58 @@ -405,17 +431,12 @@ int arch_domain_create(struct domain *d,
   10.59          if ( d->arch.ioport_caps == NULL )
   10.60              goto fail;
   10.61  
   10.62 -#ifdef __i386__
   10.63 -        if ( (d->shared_info = alloc_xenheap_page()) == NULL )
   10.64 +        /*
   10.65 +         * The shared_info machine address must fit in a 32-bit field within a
   10.66 +         * 32-bit guest's start_info structure. Hence we specify MEMF_bits(32).
   10.67 +         */
   10.68 +        if ( (d->shared_info = alloc_xenheap_pages(0, MEMF_bits(32))) == NULL )
   10.69              goto fail;
   10.70 -#else
   10.71 -        pg = alloc_domheap_page(
   10.72 -            NULL, MEMF_node(domain_to_node(d)) | MEMF_bits(32));
   10.73 -        if ( pg == NULL )
   10.74 -            goto fail;
   10.75 -        pg->count_info |= PGC_xen_heap;
   10.76 -        d->shared_info = page_to_virt(pg);
   10.77 -#endif
   10.78  
   10.79          clear_page(d->shared_info);
   10.80          share_xen_page_with_guest(
    11.1 --- a/xen/arch/x86/e820.c	Wed Jan 28 13:06:45 2009 +0900
    11.2 +++ b/xen/arch/x86/e820.c	Fri Jan 30 10:54:13 2009 +0900
    11.3 @@ -391,8 +391,9 @@ static void __init machine_specific_memo
    11.4      reserve_dmi_region();
    11.5  }
    11.6  
    11.7 -/* Reserve RAM area (@s,@e) in the specified e820 map. */
    11.8 -int __init reserve_e820_ram(struct e820map *e820, uint64_t s, uint64_t e)
    11.9 +int __init e820_change_range_type(
   11.10 +    struct e820map *e820, uint64_t s, uint64_t e,
   11.11 +    uint32_t orig_type, uint32_t new_type)
   11.12  {
   11.13      uint64_t rs = 0, re = 0;
   11.14      int i;
   11.15 @@ -406,55 +407,79 @@ int __init reserve_e820_ram(struct e820m
   11.16              break;
   11.17      }
   11.18  
   11.19 -    if ( (i == e820->nr_map) || (e820->map[i].type != E820_RAM) )
   11.20 +    if ( (i == e820->nr_map) || (e820->map[i].type != orig_type) )
   11.21          return 0;
   11.22  
   11.23      if ( (s == rs) && (e == re) )
   11.24      {
   11.25 -        /* Complete excision. */
   11.26 -        memmove(&e820->map[i], &e820->map[i+1],
   11.27 -                (e820->nr_map-i-1) * sizeof(e820->map[0]));
   11.28 -        e820->nr_map--;
   11.29 +        e820->map[i].type = new_type;
   11.30      }
   11.31 -    else if ( s == rs )
   11.32 +    else if ( (s == rs) || (e == re) )
   11.33      {
   11.34 -        /* Truncate start. */
   11.35 -        e820->map[i].addr += e - s;
   11.36 -        e820->map[i].size -= e - s;
   11.37 -    }
   11.38 -    else if ( e == re )
   11.39 -    {
   11.40 -        /* Truncate end. */
   11.41 -        e820->map[i].size -= e - s;
   11.42 -    }
   11.43 -    else if ( e820->nr_map < ARRAY_SIZE(e820->map) )
   11.44 -    {
   11.45 -        /* Split in two. */
   11.46 +        if ( (e820->nr_map + 1) > ARRAY_SIZE(e820->map) )
   11.47 +            goto overflow;
   11.48 +
   11.49          memmove(&e820->map[i+1], &e820->map[i],
   11.50                  (e820->nr_map-i) * sizeof(e820->map[0]));
   11.51          e820->nr_map++;
   11.52 -        e820->map[i].size = s - rs;
   11.53 -        i++;
   11.54 -        e820->map[i].addr = e;
   11.55 -        e820->map[i].size = re - e;
   11.56 -    }
   11.57 -    else
   11.58 -    {
   11.59 -        /* e820map is at maximum size. We have to leak some space. */
   11.60 -        if ( (s - rs) > (re - e) )
   11.61 +
   11.62 +        if ( s == rs )
   11.63          {
   11.64 -            printk("e820 overflow: leaking RAM %"PRIx64"-%"PRIx64"\n", e, re);
   11.65 -            e820->map[i].size = s - rs;
   11.66 +            e820->map[i].size = e - s;
   11.67 +            e820->map[i].type = new_type;
   11.68 +            e820->map[i+1].addr = e;
   11.69 +            e820->map[i+1].size = re - e;
   11.70          }
   11.71          else
   11.72          {
   11.73 -            printk("e820 overflow: leaking RAM %"PRIx64"-%"PRIx64"\n", rs, s);
   11.74 -            e820->map[i].addr = e;
   11.75 -            e820->map[i].size = re - e;
   11.76 +            e820->map[i].size = s - rs;
   11.77 +            e820->map[i+1].addr = s;
   11.78 +            e820->map[i+1].size = e - s;
   11.79 +            e820->map[i+1].type = new_type;
   11.80          }
   11.81      }
   11.82 +    else if ( e820->nr_map+1 < ARRAY_SIZE(e820->map) )
   11.83 +    {
   11.84 +        if ( (e820->nr_map + 2) > ARRAY_SIZE(e820->map) )
   11.85 +            goto overflow;
   11.86 +
   11.87 +        memmove(&e820->map[i+2], &e820->map[i],
   11.88 +                (e820->nr_map-i) * sizeof(e820->map[0]));
   11.89 +        e820->nr_map += 2;
   11.90 +
   11.91 +        e820->map[i].size = s - rs;
   11.92 +        e820->map[i+1].addr = s;
   11.93 +        e820->map[i+1].size = e - s;
   11.94 +        e820->map[i+1].type = new_type;
   11.95 +        e820->map[i+2].addr = e;
   11.96 +        e820->map[i+2].size = re - e;
   11.97 +    }
   11.98 +
   11.99 +    /* Finally, look for any opportunities to merge adjacent e820 entries. */
  11.100 +    for ( i = 0; i < (e820->nr_map - 1); i++ )
  11.101 +    {
  11.102 +        if ( (e820->map[i].type != e820->map[i+1].type) ||
  11.103 +             ((e820->map[i].addr + e820->map[i].size) != e820->map[i+1].addr) )
  11.104 +            continue;
  11.105 +        e820->map[i].size += e820->map[i+1].size;
  11.106 +        memmove(&e820->map[i+1], &e820->map[i+2],
  11.107 +                (e820->nr_map-i-2) * sizeof(e820->map[0]));
  11.108 +        e820->nr_map--;
  11.109 +        i--;
  11.110 +    }
  11.111  
  11.112      return 1;
  11.113 +
  11.114 + overflow:
  11.115 +    printk("Overflow in e820 while reserving region %"PRIx64"-%"PRIx64"\n",
  11.116 +           s, e);
  11.117 +    return 0;
  11.118 +}
  11.119 +
  11.120 +/* Set E820_RAM area (@s,@e) as RESERVED in specified e820 map. */
  11.121 +int __init reserve_e820_ram(struct e820map *e820, uint64_t s, uint64_t e)
  11.122 +{
  11.123 +    return e820_change_range_type(e820, s, e, E820_RAM, E820_RESERVED);
  11.124  }
  11.125  
  11.126  unsigned long __init init_e820(
    12.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Wed Jan 28 13:06:45 2009 +0900
    12.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Fri Jan 30 10:54:13 2009 +0900
    12.3 @@ -138,7 +138,7 @@ static int construct_vmcb(struct vcpu *v
    12.4                              CR_INTERCEPT_CR8_WRITE);
    12.5  
    12.6      /* I/O and MSR permission bitmaps. */
    12.7 -    arch_svm->msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE));
    12.8 +    arch_svm->msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0);
    12.9      if ( arch_svm->msrpm == NULL )
   12.10          return -ENOMEM;
   12.11      memset(arch_svm->msrpm, 0xff, MSRPM_SIZE);
    13.1 --- a/xen/arch/x86/mm/p2m.c	Wed Jan 28 13:06:45 2009 +0900
    13.2 +++ b/xen/arch/x86/mm/p2m.c	Fri Jan 30 10:54:13 2009 +0900
    13.3 @@ -713,116 +713,121 @@ p2m_pod_dump_data(struct domain *d)
    13.4  
    13.5  #define superpage_aligned(_x)  (((_x)&((1<<9)-1))==0)
    13.6  
    13.7 -/* Must be called w/ p2m lock held, page_alloc lock not held */
    13.8 +/* Search for all-zero superpages to be reclaimed as superpages for the
    13.9 + * PoD cache. Must be called w/ p2m lock held, page_alloc lock not held. */
   13.10  static int
   13.11  p2m_pod_zero_check_superpage(struct domain *d, unsigned long gfn)
   13.12  {
   13.13 -    mfn_t mfns[1<<9];
   13.14 -    p2m_type_t types[1<<9];
   13.15 -    unsigned long * map[1<<9] = { NULL };
   13.16 -    int ret=0, reset = 0, reset_max = 0;
   13.17 +    mfn_t mfn, mfn0 = _mfn(INVALID_MFN);
   13.18 +    p2m_type_t type, type0 = 0;
   13.19 +    unsigned long * map = NULL;
   13.20 +    int ret=0, reset = 0;
   13.21      int i, j;
   13.22 +    int max_ref = 1;
   13.23  
   13.24      if ( !superpage_aligned(gfn) )
   13.25          goto out;
   13.26  
   13.27 +    /* Allow an extra refcount for one shadow pt mapping in shadowed domains */
   13.28 +    if ( paging_mode_shadow(d) )
   13.29 +        max_ref++;
   13.30 +
   13.31      /* Look up the mfns, checking to make sure they're the same mfn
   13.32       * and aligned, and mapping them. */
   13.33      for ( i=0; i<(1<<9); i++ )
   13.34      {
   13.35 -        mfns[i] = gfn_to_mfn_query(d, gfn + i, types + i);
   13.36 +        
   13.37 +        mfn = gfn_to_mfn_query(d, gfn + i, &type);
   13.38 +
   13.39 +        if ( i == 0 )
   13.40 +        {
   13.41 +            mfn0 = mfn;
   13.42 +            type0 = type;
   13.43 +        }
   13.44  
   13.45          /* Conditions that must be met for superpage-superpage:
   13.46           * + All gfns are ram types
   13.47           * + All gfns have the same type
   13.48 +         * + All of the mfns are allocated to a domain
   13.49           * + None of the mfns are used as pagetables
   13.50           * + The first mfn is 2-meg aligned
   13.51           * + All the other mfns are in sequence
   13.52 +         * Adding for good measure:
   13.53 +         * + None of the mfns are likely to be mapped elsewhere (refcount
   13.54 +         *   2 or less for shadow, 1 for hap)
   13.55           */
   13.56 -        if ( p2m_is_ram(types[i])
   13.57 -             && types[i] == types[0]
   13.58 -             && ( (mfn_to_page(mfns[i])->count_info & PGC_page_table) == 0 )
   13.59 -             && ( ( i == 0 && superpage_aligned(mfn_x(mfns[0])) )
   13.60 -                  || ( i != 0 && mfn_x(mfns[i]) == mfn_x(mfns[0]) + i ) ) )
   13.61 -            map[i] = map_domain_page(mfn_x(mfns[i]));
   13.62 -        else
   13.63 -            goto out_unmap;
   13.64 +        if ( !p2m_is_ram(type)
   13.65 +             || type != type0
   13.66 +             || ( (mfn_to_page(mfn)->count_info & PGC_allocated) == 0 )
   13.67 +             || ( (mfn_to_page(mfn)->count_info & PGC_page_table) != 0 )
   13.68 +             || ( (mfn_to_page(mfn)->count_info & PGC_count_mask) > max_ref )
   13.69 +             || !( ( i == 0 && superpage_aligned(mfn_x(mfn0)) )
   13.70 +                   || ( i != 0 && mfn_x(mfn) == (mfn_x(mfn0) + i) ) ) )
   13.71 +            goto out;
   13.72      }
   13.73  
   13.74      /* Now, do a quick check to see if it may be zero before unmapping. */
   13.75      for ( i=0; i<(1<<9); i++ )
   13.76      {
   13.77          /* Quick zero-check */
   13.78 +        map = map_domain_page(mfn_x(mfn0) + i);
   13.79 +
   13.80          for ( j=0; j<16; j++ )
   13.81 -            if( *(map[i]+j) != 0 )
   13.82 +            if( *(map+j) != 0 )
   13.83                  break;
   13.84  
   13.85 +        unmap_domain_page(map);
   13.86 +
   13.87          if ( j < 16 )
   13.88 -            goto out_unmap;
   13.89 +            goto out;
   13.90  
   13.91      }
   13.92  
   13.93      /* Try to remove the page, restoring old mapping if it fails. */
   13.94 -    reset_max = 1<<9;
   13.95      set_p2m_entry(d, gfn,
   13.96                    _mfn(POPULATE_ON_DEMAND_MFN), 9,
   13.97                    p2m_populate_on_demand);
   13.98  
   13.99 -    if ( (mfn_to_page(mfns[0])->u.inuse.type_info & PGT_count_mask) != 0 )
  13.100 +    /* Make none of the MFNs are used elsewhere... for example, mapped
  13.101 +     * via the grant table interface, or by qemu.  Allow one refcount for
  13.102 +     * being allocated to the domain. */
  13.103 +    for ( i=0; i < (1<<9); i++ )
  13.104      {
  13.105 -        reset = 1;
  13.106 -        goto out_reset;
  13.107 -    }
  13.108 -
  13.109 -    /* Timing here is important.  We need to make sure not to reclaim
  13.110 -     * a page which has been grant-mapped to another domain.  But we
  13.111 -     * can't grab the grant table lock, because we may be invoked from
  13.112 -     * the grant table code!  So we first remove the page from the
  13.113 -     * p2m, then check to see if the gpfn has been granted.  Once this
  13.114 -     * gpfn is marked PoD, any future gfn_to_mfn() call will block
  13.115 -     * waiting for the p2m lock.  If we find that it has been granted, we
  13.116 -     * simply restore the old value.
  13.117 -     */
  13.118 -    if ( gnttab_is_granted(d, gfn, 9) )
  13.119 -    {
  13.120 -        printk("gfn contains grant table %lx\n", gfn);
  13.121 -        reset = 1;
  13.122 -        goto out_reset;
  13.123 +        mfn = _mfn(mfn_x(mfn0) + i);
  13.124 +        if ( (mfn_to_page(mfn)->count_info & PGC_count_mask) > 1 )
  13.125 +        {
  13.126 +            reset = 1;
  13.127 +            goto out_reset;
  13.128 +        }
  13.129      }
  13.130  
  13.131      /* Finally, do a full zero-check */
  13.132      for ( i=0; i < (1<<9); i++ )
  13.133      {
  13.134 -        for ( j=0; j<PAGE_SIZE/sizeof(*map[i]); j++ )
  13.135 -            if( *(map[i]+j) != 0 )
  13.136 +        map = map_domain_page(mfn_x(mfn0) + i);
  13.137 +
  13.138 +        for ( j=0; j<PAGE_SIZE/sizeof(*map); j++ )
  13.139 +            if( *(map+j) != 0 )
  13.140              {
  13.141                  reset = 1;
  13.142                  break;
  13.143              }
  13.144  
  13.145 +        unmap_domain_page(map);
  13.146 +
  13.147          if ( reset )
  13.148              goto out_reset;
  13.149      }
  13.150  
  13.151      /* Finally!  We've passed all the checks, and can add the mfn superpage
  13.152       * back on the PoD cache, and account for the new p2m PoD entries */
  13.153 -    p2m_pod_cache_add(d, mfn_to_page(mfns[0]), 9);
  13.154 +    p2m_pod_cache_add(d, mfn_to_page(mfn0), 9);
  13.155      d->arch.p2m->pod.entry_count += (1<<9);
  13.156  
  13.157  out_reset:
  13.158      if ( reset )
  13.159 -    {
  13.160 -        if (reset_max == (1<<9) )
  13.161 -            set_p2m_entry(d, gfn, mfns[0], 9, types[0]);
  13.162 -        else
  13.163 -            for ( i=0; i<reset_max; i++)
  13.164 -                set_p2m_entry(d, gfn + i, mfns[i], 0, types[i]);
  13.165 -    }
  13.166 +        set_p2m_entry(d, gfn, mfn0, 9, type0);
  13.167      
  13.168 -out_unmap:
  13.169 -    for ( i=0; i<(1<<9); i++ )
  13.170 -        if ( map[i] )
  13.171 -            unmap_domain_page(map[i]);
  13.172  out:
  13.173      return ret;
  13.174  }
  13.175 @@ -835,15 +840,22 @@ p2m_pod_zero_check(struct domain *d, uns
  13.176      unsigned long * map[count];
  13.177  
  13.178      int i, j;
  13.179 +    int max_ref = 1;
  13.180 +
  13.181 +    /* Allow an extra refcount for one shadow pt mapping in shadowed domains */
  13.182 +    if ( paging_mode_shadow(d) )
  13.183 +        max_ref++;
  13.184  
  13.185      /* First, get the gfn list, translate to mfns, and map the pages. */
  13.186      for ( i=0; i<count; i++ )
  13.187      {
  13.188          mfns[i] = gfn_to_mfn_query(d, gfns[i], types + i);
  13.189 -        /* If this is ram, and not a pagetable, map it; otherwise,
  13.190 -         * skip. */
  13.191 +        /* If this is ram, and not a pagetable, and probably not mapped
  13.192 +           elsewhere, map it; otherwise, skip. */
  13.193          if ( p2m_is_ram(types[i])
  13.194 -             && ( (mfn_to_page(mfns[i])->count_info & PGC_page_table) == 0 ) )
  13.195 +             && ( (mfn_to_page(mfns[i])->count_info & PGC_allocated) != 0 ) 
  13.196 +             && ( (mfn_to_page(mfns[i])->count_info & PGC_page_table) == 0 ) 
  13.197 +             && ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) <= max_ref ) )
  13.198              map[i] = map_domain_page(mfn_x(mfns[i]));
  13.199          else
  13.200              map[i] = NULL;
  13.201 @@ -873,7 +885,9 @@ p2m_pod_zero_check(struct domain *d, uns
  13.202                        _mfn(POPULATE_ON_DEMAND_MFN), 0,
  13.203                        p2m_populate_on_demand);
  13.204  
  13.205 -        if ( (mfn_to_page(mfns[i])->u.inuse.type_info & PGT_count_mask) != 0 )
  13.206 +        /* See if the page was successfully unmapped.  (Allow one refcount
  13.207 +         * for being allocated to a domain.) */
  13.208 +        if ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) > 1 )
  13.209          {
  13.210              unmap_domain_page(map[i]);
  13.211              map[i] = NULL;
  13.212 @@ -896,8 +910,7 @@ p2m_pod_zero_check(struct domain *d, uns
  13.213  
  13.214          /* See comment in p2m_pod_zero_check_superpage() re gnttab
  13.215           * check timing.  */
  13.216 -        if ( j < PAGE_SIZE/sizeof(*map[i])
  13.217 -             || gnttab_is_granted(d, gfns[i], 0) )
  13.218 +        if ( j < PAGE_SIZE/sizeof(*map[i]) )
  13.219          {
  13.220              set_p2m_entry(d, gfns[i], mfns[i], 0, types[i]);
  13.221              continue;
    14.1 --- a/xen/arch/x86/setup.c	Wed Jan 28 13:06:45 2009 +0900
    14.2 +++ b/xen/arch/x86/setup.c	Fri Jan 30 10:54:13 2009 +0900
    14.3 @@ -417,7 +417,7 @@ void __init __start_xen(unsigned long mb
    14.4      unsigned int initrdidx = 1;
    14.5      multiboot_info_t *mbi = __va(mbi_p);
    14.6      module_t *mod = (module_t *)__va(mbi->mods_addr);
    14.7 -    unsigned long nr_pages, modules_length, modules_headroom;
    14.8 +    unsigned long nr_pages, modules_length, modules_headroom = -1;
    14.9      unsigned long allocator_bitmap_end;
   14.10      int i, e820_warn = 0, bytes = 0;
   14.11      struct ns16550_defaults ns16550 = {
   14.12 @@ -617,9 +617,6 @@ void __init __start_xen(unsigned long mb
   14.13       * x86/64, we relocate Xen to higher memory.
   14.14       */
   14.15      modules_length = mod[mbi->mods_count-1].mod_end - mod[0].mod_start;
   14.16 -    modules_headroom = bzimage_headroom(
   14.17 -        (char *)(unsigned long)mod[0].mod_start,
   14.18 -        (unsigned long)(mod[0].mod_end - mod[0].mod_start));
   14.19  
   14.20      for ( i = boot_e820.nr_map-1; i >= 0; i-- )
   14.21      {
   14.22 @@ -724,6 +721,11 @@ void __init __start_xen(unsigned long mb
   14.23          }
   14.24  #endif
   14.25  
   14.26 +        if ( modules_headroom == -1 )
   14.27 +            modules_headroom = bzimage_headroom(
   14.28 +                      (char *)(unsigned long)mod[0].mod_start,
   14.29 +                      (unsigned long)(mod[0].mod_end - mod[0].mod_start));
   14.30 +
   14.31          /* Is the region suitable for relocating the multiboot modules? */
   14.32          if ( !initial_images_start && (s < e) &&
   14.33               ((e-s) >= (modules_length+modules_headroom)) )
   14.34 @@ -1033,6 +1035,9 @@ void __init __start_xen(unsigned long mb
   14.35      if ( xen_cpuidle )
   14.36          xen_processor_pmbits |= XEN_PROCESSOR_PM_CX;
   14.37  
   14.38 +    if ( !tboot_protect_mem_regions() )
   14.39 +        panic("Could not protect TXT memory regions\n");
   14.40 +
   14.41      /*
   14.42       * We're going to setup domain0 using the module(s) that we stashed safely
   14.43       * above our heap. The second module, if present, is an initrd ramdisk.
    15.1 --- a/xen/arch/x86/smpboot.c	Wed Jan 28 13:06:45 2009 +0900
    15.2 +++ b/xen/arch/x86/smpboot.c	Fri Jan 30 10:54:13 2009 +0900
    15.3 @@ -804,7 +804,7 @@ static inline int alloc_cpu_id(void)
    15.4  static void *prepare_idle_stack(unsigned int cpu)
    15.5  {
    15.6  	if (!stack_base[cpu])
    15.7 -		stack_base[cpu] = alloc_xenheap_pages(STACK_ORDER);
    15.8 +		stack_base[cpu] = alloc_xenheap_pages(STACK_ORDER, 0);
    15.9  
   15.10  	return stack_base[cpu];
   15.11  }
   15.12 @@ -867,7 +867,7 @@ static int __devinit do_boot_cpu(int api
   15.13  					   MEMF_node(cpu_to_node(cpu)));
   15.14  		per_cpu(gdt_table, cpu) = gdt = page_to_virt(page);
   15.15  #else
   15.16 -		per_cpu(gdt_table, cpu) = gdt = alloc_xenheap_pages(order);
   15.17 +		per_cpu(gdt_table, cpu) = gdt = alloc_xenheap_pages(order, 0);
   15.18  #endif
   15.19  		memcpy(gdt, boot_cpu_gdt_table,
   15.20  		       NR_RESERVED_GDT_PAGES * PAGE_SIZE);
    16.1 --- a/xen/arch/x86/tboot.c	Wed Jan 28 13:06:45 2009 +0900
    16.2 +++ b/xen/arch/x86/tboot.c	Fri Jan 30 10:54:13 2009 +0900
    16.3 @@ -6,6 +6,7 @@
    16.4  #include <asm/fixmap.h>
    16.5  #include <asm/page.h>
    16.6  #include <asm/processor.h>
    16.7 +#include <asm/e820.h>
    16.8  #include <asm/tboot.h>
    16.9  
   16.10  /* tboot=<physical address of shared page> */
   16.11 @@ -17,10 +18,56 @@ tboot_shared_t *g_tboot_shared;
   16.12  
   16.13  static const uuid_t tboot_shared_uuid = TBOOT_SHARED_UUID;
   16.14  
   16.15 +/* used by tboot_protect_mem_regions() and/or tboot_parse_dmar_table() */
   16.16 +static uint64_t txt_heap_base, txt_heap_size;
   16.17 +static uint64_t sinit_base, sinit_size;
   16.18 +
   16.19 +/*
   16.20 + * TXT configuration registers (offsets from TXT_{PUB, PRIV}_CONFIG_REGS_BASE)
   16.21 + */
   16.22 +
   16.23 +#define TXT_PUB_CONFIG_REGS_BASE       0xfed30000
   16.24 +#define TXT_PRIV_CONFIG_REGS_BASE      0xfed20000
   16.25 +
   16.26 +/* # pages for each config regs space - used by fixmap */
   16.27 +#define NR_TXT_CONFIG_PAGES     ((TXT_PUB_CONFIG_REGS_BASE -                \
   16.28 +                                  TXT_PRIV_CONFIG_REGS_BASE) >> PAGE_SHIFT)
   16.29 +
   16.30 +/* offsets from pub/priv config space */
   16.31 +#define TXTCR_SINIT_BASE            0x0270
   16.32 +#define TXTCR_SINIT_SIZE            0x0278
   16.33 +#define TXTCR_HEAP_BASE             0x0300
   16.34 +#define TXTCR_HEAP_SIZE             0x0308
   16.35 +
   16.36 +extern char __init_begin[], __per_cpu_start[], __per_cpu_end[], __bss_start[];
   16.37 +
   16.38 +#define SHA1_SIZE      20
   16.39 +typedef uint8_t   sha1_hash_t[SHA1_SIZE];
   16.40 +
   16.41 +typedef struct __packed {
   16.42 +    uint32_t     version;             /* currently 6 */
   16.43 +    sha1_hash_t  bios_acm_id;
   16.44 +    uint32_t     edx_senter_flags;
   16.45 +    uint64_t     mseg_valid;
   16.46 +    sha1_hash_t  sinit_hash;
   16.47 +    sha1_hash_t  mle_hash;
   16.48 +    sha1_hash_t  stm_hash;
   16.49 +    sha1_hash_t  lcp_policy_hash;
   16.50 +    uint32_t     lcp_policy_control;
   16.51 +    uint32_t     rlp_wakeup_addr;
   16.52 +    uint32_t     reserved;
   16.53 +    uint32_t     num_mdrs;
   16.54 +    uint32_t     mdrs_off;
   16.55 +    uint32_t     num_vtd_dmars;
   16.56 +    uint32_t     vtd_dmars_off;
   16.57 +} sinit_mle_data_t;
   16.58 +
   16.59  void __init tboot_probe(void)
   16.60  {
   16.61      tboot_shared_t *tboot_shared;
   16.62      unsigned long p_tboot_shared;
   16.63 +    uint32_t map_base, map_size;
   16.64 +    unsigned long map_addr;
   16.65  
   16.66      /* Look for valid page-aligned address for shared page. */
   16.67      p_tboot_shared = simple_strtoul(opt_tboot, NULL, 0);
   16.68 @@ -30,24 +77,48 @@ void __init tboot_probe(void)
   16.69      /* Map and check for tboot UUID. */
   16.70      set_fixmap(FIX_TBOOT_SHARED_BASE, p_tboot_shared);
   16.71      tboot_shared = (tboot_shared_t *)fix_to_virt(FIX_TBOOT_SHARED_BASE);
   16.72 +    if ( tboot_shared == NULL )
   16.73 +        return;
   16.74      if ( memcmp(&tboot_shared_uuid, (uuid_t *)tboot_shared, sizeof(uuid_t)) )
   16.75          return;
   16.76  
   16.77 +    /* new tboot_shared (w/ GAS support) is not backwards compatible */
   16.78 +    if ( tboot_shared->version < 3 ) {
   16.79 +        printk("unsupported version of tboot (%u)\n", tboot_shared->version);
   16.80 +        return;
   16.81 +    }
   16.82 +
   16.83      g_tboot_shared = tboot_shared;
   16.84      printk("TBOOT: found shared page at phys addr %lx:\n", p_tboot_shared);
   16.85      printk("  version: %d\n", tboot_shared->version);
   16.86      printk("  log_addr: 0x%08x\n", tboot_shared->log_addr);
   16.87 -    printk("  shutdown_entry32: 0x%08x\n", tboot_shared->shutdown_entry32);
   16.88 -    printk("  shutdown_entry64: 0x%08x\n", tboot_shared->shutdown_entry64);
   16.89 -    printk("  shutdown_type: %d\n", tboot_shared->shutdown_type);
   16.90 -    printk("  s3_tb_wakeup_entry: 0x%08x\n", tboot_shared->s3_tb_wakeup_entry);
   16.91 -    printk("  s3_k_wakeup_entry: 0x%08x\n", tboot_shared->s3_k_wakeup_entry);
   16.92 -    printk("  &acpi_sinfo: 0x%p\n", &tboot_shared->acpi_sinfo);
   16.93 -    if ( tboot_shared->version >= 0x02 )
   16.94 -    {
   16.95 -        printk("  tboot_base: 0x%08x\n", tboot_shared->tboot_base);
   16.96 -        printk("  tboot_size: 0x%x\n", tboot_shared->tboot_size);
   16.97 -    }
   16.98 +    printk("  shutdown_entry: 0x%08x\n", tboot_shared->shutdown_entry);
   16.99 +    printk("  tboot_base: 0x%08x\n", tboot_shared->tboot_base);
  16.100 +    printk("  tboot_size: 0x%x\n", tboot_shared->tboot_size);
  16.101 +
  16.102 +    /* these will be needed by tboot_protect_mem_regions() and/or
  16.103 +       tboot_parse_dmar_table(), so get them now */
  16.104 +
  16.105 +    map_base = PFN_DOWN(TXT_PUB_CONFIG_REGS_BASE);
  16.106 +    map_size = PFN_UP(NR_TXT_CONFIG_PAGES * PAGE_SIZE);
  16.107 +    map_addr = (unsigned long)__va(map_base << PAGE_SHIFT);
  16.108 +    if ( map_pages_to_xen(map_addr, map_base, map_size, __PAGE_HYPERVISOR) )
  16.109 +        return;
  16.110 +
  16.111 +    /* TXT Heap */
  16.112 +    txt_heap_base =
  16.113 +        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_BASE);
  16.114 +    txt_heap_size =
  16.115 +        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_SIZE);
  16.116 +
  16.117 +    /* SINIT */
  16.118 +    sinit_base =
  16.119 +        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_BASE);
  16.120 +    sinit_size =
  16.121 +        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_SIZE);
  16.122 +
  16.123 +    destroy_xen_mappings((unsigned long)__va(map_base << PAGE_SHIFT),
  16.124 +                         (unsigned long)__va((map_base + map_size) << PAGE_SHIFT));
  16.125  }
  16.126  
  16.127  void tboot_shutdown(uint32_t shutdown_type)
  16.128 @@ -59,17 +130,28 @@ void tboot_shutdown(uint32_t shutdown_ty
  16.129  
  16.130      local_irq_disable();
  16.131  
  16.132 +    /* if this is S3 then set regions to MAC */
  16.133 +    if ( shutdown_type == TB_SHUTDOWN_S3 ) {
  16.134 +        g_tboot_shared->num_mac_regions = 4;
  16.135 +        /* S3 resume code (and other real mode trampoline code) */
  16.136 +        g_tboot_shared->mac_regions[0].start =
  16.137 +            (uint64_t)bootsym_phys(trampoline_start);
  16.138 +        g_tboot_shared->mac_regions[0].end =
  16.139 +            (uint64_t)bootsym_phys(trampoline_end);
  16.140 +        /* hypervisor code + data */
  16.141 +        g_tboot_shared->mac_regions[1].start = (uint64_t)__pa(&_stext);
  16.142 +        g_tboot_shared->mac_regions[1].end = (uint64_t)__pa(&__init_begin);
  16.143 +        /* per-cpu data */
  16.144 +        g_tboot_shared->mac_regions[2].start = (uint64_t)__pa(&__per_cpu_start);
  16.145 +        g_tboot_shared->mac_regions[2].end = (uint64_t)__pa(&__per_cpu_end);
  16.146 +        /* bss */
  16.147 +        g_tboot_shared->mac_regions[3].start = (uint64_t)__pa(&__bss_start);
  16.148 +        g_tboot_shared->mac_regions[3].end = (uint64_t)__pa(&_end);
  16.149 +    }
  16.150 +
  16.151      /* Create identity map for tboot shutdown code. */
  16.152 -    if ( g_tboot_shared->version >= 0x02 )
  16.153 -    {
  16.154 -        map_base = PFN_DOWN(g_tboot_shared->tboot_base);
  16.155 -        map_size = PFN_UP(g_tboot_shared->tboot_size);
  16.156 -    }
  16.157 -    else
  16.158 -    {
  16.159 -        map_base = 0;
  16.160 -        map_size = PFN_UP(0xa0000);
  16.161 -    }
  16.162 +    map_base = PFN_DOWN(g_tboot_shared->tboot_base);
  16.163 +    map_size = PFN_UP(g_tboot_shared->tboot_size);
  16.164  
  16.165      err = map_pages_to_xen(map_base << PAGE_SHIFT, map_base, map_size,
  16.166                             __PAGE_HYPERVISOR);
  16.167 @@ -82,11 +164,7 @@ void tboot_shutdown(uint32_t shutdown_ty
  16.168  
  16.169      write_ptbase(idle_vcpu[0]);
  16.170  
  16.171 -#ifdef __x86_64__
  16.172 -    asm volatile ( "call *%%rdi" :: "D" (g_tboot_shared->shutdown_entry64) );
  16.173 -#else
  16.174 -    asm volatile ( "call *%0" :: "r" (g_tboot_shared->shutdown_entry32) );
  16.175 -#endif
  16.176 +    ((void(*)(void))(unsigned long)g_tboot_shared->shutdown_entry)();
  16.177  
  16.178      BUG(); /* should not reach here */
  16.179  }
  16.180 @@ -96,6 +174,96 @@ int tboot_in_measured_env(void)
  16.181      return (g_tboot_shared != NULL);
  16.182  }
  16.183  
  16.184 +int __init tboot_protect_mem_regions(void)
  16.185 +{
  16.186 +    int rc;
  16.187 +
  16.188 +    if ( !tboot_in_measured_env() )
  16.189 +        return 1;
  16.190 +
  16.191 +    /* TXT Heap */
  16.192 +    if ( txt_heap_base == 0 )
  16.193 +        return 0;
  16.194 +    rc = e820_change_range_type(
  16.195 +        &e820, txt_heap_base, txt_heap_base + txt_heap_size,
  16.196 +        E820_RESERVED, E820_UNUSABLE);
  16.197 +    if ( !rc )
  16.198 +        return 0;
  16.199 +
  16.200 +    /* SINIT */
  16.201 +    if ( sinit_base == 0 )
  16.202 +        return 0;
  16.203 +    rc = e820_change_range_type(
  16.204 +        &e820, sinit_base, sinit_base + sinit_size,
  16.205 +        E820_RESERVED, E820_UNUSABLE);
  16.206 +    if ( !rc )
  16.207 +        return 0;
  16.208 +
  16.209 +    /* TXT Private Space */
  16.210 +    rc = e820_change_range_type(
  16.211 +        &e820, TXT_PRIV_CONFIG_REGS_BASE,
  16.212 +        TXT_PRIV_CONFIG_REGS_BASE + NR_TXT_CONFIG_PAGES * PAGE_SIZE,
  16.213 +        E820_RESERVED, E820_UNUSABLE);
  16.214 +    if ( !rc )
  16.215 +        return 0;
  16.216 +
  16.217 +    return 1;
  16.218 +}
  16.219 +
  16.220 +int __init tboot_parse_dmar_table(acpi_table_handler dmar_handler)
  16.221 +{
  16.222 +    uint32_t map_base, map_size;
  16.223 +    unsigned long map_vaddr;
  16.224 +    void *heap_ptr;
  16.225 +    struct acpi_table_header *dmar_table;
  16.226 +    int rc;
  16.227 +
  16.228 +    if ( !tboot_in_measured_env() )
  16.229 +        return acpi_table_parse(ACPI_SIG_DMAR, dmar_handler);
  16.230 +
  16.231 +    /* ACPI tables may not be DMA protected by tboot, so use DMAR copy */
  16.232 +    /* SINIT saved in SinitMleData in TXT heap (which is DMA protected) */
  16.233 +
  16.234 +    if ( txt_heap_base == 0 )
  16.235 +        return 1;
  16.236 +
  16.237 +    /* map TXT heap into Xen addr space */
  16.238 +    map_base = PFN_DOWN(txt_heap_base);
  16.239 +    map_size = PFN_UP(txt_heap_size);
  16.240 +    map_vaddr = (unsigned long)__va(map_base << PAGE_SHIFT);
  16.241 +    if ( map_pages_to_xen(map_vaddr, map_base, map_size, __PAGE_HYPERVISOR) )
  16.242 +        return 1;
  16.243 +
  16.244 +    /* walk heap to SinitMleData */
  16.245 +    heap_ptr = __va(txt_heap_base);
  16.246 +    /* skip BiosData */
  16.247 +    heap_ptr += *(uint64_t *)heap_ptr;
  16.248 +    /* skip OsMleData */
  16.249 +    heap_ptr += *(uint64_t *)heap_ptr;
  16.250 +    /* skip OsSinitData */
  16.251 +    heap_ptr += *(uint64_t *)heap_ptr;
  16.252 +    /* now points to SinitMleDataSize; set to SinitMleData */
  16.253 +    heap_ptr += sizeof(uint64_t);
  16.254 +    /* get addr of DMAR table */
  16.255 +    dmar_table = (struct acpi_table_header *)(heap_ptr +
  16.256 +            ((sinit_mle_data_t *)heap_ptr)->vtd_dmars_off - sizeof(uint64_t));
  16.257 +
  16.258 +    rc = dmar_handler(dmar_table);
  16.259 +
  16.260 +    destroy_xen_mappings(
  16.261 +        (unsigned long)__va(map_base << PAGE_SHIFT),
  16.262 +        (unsigned long)__va((map_base + map_size) << PAGE_SHIFT));
  16.263 +  
  16.264 +    /* acpi_parse_dmar() zaps APCI DMAR signature in TXT heap table */
  16.265 +    /* but dom0 will read real table, so must zap it there too */
  16.266 +    dmar_table = NULL;
  16.267 +    acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_table);
  16.268 +    if ( dmar_table != NULL )
  16.269 +        ((struct acpi_table_dmar *)dmar_table)->header.signature[0] = '\0';
  16.270 +
  16.271 +    return rc;
  16.272 +}
  16.273 +
  16.274  /*
  16.275   * Local variables:
  16.276   * mode: C
    17.1 --- a/xen/common/domain.c	Wed Jan 28 13:06:45 2009 +0900
    17.2 +++ b/xen/common/domain.c	Fri Jan 30 10:54:13 2009 +0900
    17.3 @@ -102,16 +102,6 @@ int current_domain_id(void)
    17.4      return current->domain->domain_id;
    17.5  }
    17.6  
    17.7 -static struct domain *alloc_domain_struct(void)
    17.8 -{
    17.9 -    return xmalloc(struct domain);
   17.10 -}
   17.11 -
   17.12 -static void free_domain_struct(struct domain *d)
   17.13 -{
   17.14 -    xfree(d);
   17.15 -}
   17.16 -
   17.17  static void __domain_finalise_shutdown(struct domain *d)
   17.18  {
   17.19      struct vcpu *v;
    18.1 --- a/xen/common/grant_table.c	Wed Jan 28 13:06:45 2009 +0900
    18.2 +++ b/xen/common/grant_table.c	Fri Jan 30 10:54:13 2009 +0900
    18.3 @@ -111,33 +111,6 @@ static unsigned inline int max_nr_maptra
    18.4  #define active_entry(t, e) \
    18.5      ((t)->active[(e)/ACGNT_PER_PAGE][(e)%ACGNT_PER_PAGE])
    18.6  
    18.7 -/* The p2m emergency sweep code should not reclaim a frame that is currenlty
    18.8 - * grant mapped by another domain.  That would involve checking all other
    18.9 - * domains grant maps, which is impractical.  Instead, we check the active
   18.10 - * grant table for this domain to see if it's been granted.  Since this
   18.11 - * may be called as a result of a grant table op, we can't grab the lock. */
   18.12 -int
   18.13 -gnttab_is_granted(struct domain *d, xen_pfn_t gfn, int order)
   18.14 -{
   18.15 -    int i, found=0;
   18.16 -    struct active_grant_entry *act;
   18.17 -
   18.18 -    /* We need to compare with active grant entries to make sure that
   18.19 -     * pinned (== currently mapped) entries don't disappear under our
   18.20 -     * feet. */
   18.21 -    for ( i=0; i<nr_grant_entries(d->grant_table); i++ )
   18.22 -    {
   18.23 -        act = &active_entry(d->grant_table, i);
   18.24 -        if ( act->gfn >> order == gfn >> order )
   18.25 -        {
   18.26 -            found = 1;
   18.27 -            break;
   18.28 -        }
   18.29 -    }
   18.30 -
   18.31 -    return found;
   18.32 -}
   18.33 -
   18.34  static inline int
   18.35  __get_maptrack_handle(
   18.36      struct grant_table *t)
    19.1 --- a/xen/common/page_alloc.c	Wed Jan 28 13:06:45 2009 +0900
    19.2 +++ b/xen/common/page_alloc.c	Fri Jan 30 10:54:13 2009 +0900
    19.3 @@ -655,7 +655,7 @@ void init_xenheap_pages(paddr_t ps, padd
    19.4  }
    19.5  
    19.6  
    19.7 -void *alloc_xenheap_pages(unsigned int order)
    19.8 +void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
    19.9  {
   19.10      struct page_info *pg;
   19.11  
   19.12 @@ -664,15 +664,11 @@ void *alloc_xenheap_pages(unsigned int o
   19.13      pg = alloc_heap_pages(
   19.14          MEMZONE_XEN, MEMZONE_XEN, cpu_to_node(smp_processor_id()), order);
   19.15      if ( unlikely(pg == NULL) )
   19.16 -        goto no_memory;
   19.17 +        return NULL;
   19.18  
   19.19      memguard_unguard_range(page_to_virt(pg), 1 << (order + PAGE_SHIFT));
   19.20  
   19.21      return page_to_virt(pg);
   19.22 -
   19.23 - no_memory:
   19.24 -    printk("Cannot handle page request order %d!\n", order);
   19.25 -    return NULL;
   19.26  }
   19.27  
   19.28  
   19.29 @@ -695,26 +691,21 @@ void init_xenheap_pages(paddr_t ps, padd
   19.30      init_domheap_pages(ps, pe);
   19.31  }
   19.32  
   19.33 -void *alloc_xenheap_pages(unsigned int order)
   19.34 +void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
   19.35  {
   19.36      struct page_info *pg;
   19.37      unsigned int i;
   19.38  
   19.39      ASSERT(!in_irq());
   19.40  
   19.41 -    pg = alloc_heap_pages(
   19.42 -        MEMZONE_XEN+1, NR_ZONES-1, cpu_to_node(smp_processor_id()), order);
   19.43 +    pg = alloc_domheap_pages(NULL, order, memflags);
   19.44      if ( unlikely(pg == NULL) )
   19.45 -        goto no_memory;
   19.46 +        return NULL;
   19.47  
   19.48      for ( i = 0; i < (1u << order); i++ )
   19.49          pg[i].count_info |= PGC_xen_heap;
   19.50  
   19.51      return page_to_virt(pg);
   19.52 -
   19.53 - no_memory:
   19.54 -    printk("Cannot handle page request order %d!\n", order);
   19.55 -    return NULL;
   19.56  }
   19.57  
   19.58  void free_xenheap_pages(void *v, unsigned int order)
    20.1 --- a/xen/common/trace.c	Wed Jan 28 13:06:45 2009 +0900
    20.2 +++ b/xen/common/trace.c	Fri Jan 30 10:54:13 2009 +0900
    20.3 @@ -94,7 +94,7 @@ static int alloc_trace_bufs(void)
    20.4      order    = get_order_from_pages(nr_pages);
    20.5      data_size  = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf));
    20.6      
    20.7 -    if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
    20.8 +    if ( (rawbuf = alloc_xenheap_pages(order, 0)) == NULL )
    20.9      {
   20.10          printk("Xen trace buffers: memory allocation failed\n");
   20.11          opt_tbuf_size = 0;
    21.1 --- a/xen/common/xenoprof.c	Wed Jan 28 13:06:45 2009 +0900
    21.2 +++ b/xen/common/xenoprof.c	Fri Jan 30 10:54:13 2009 +0900
    21.3 @@ -225,7 +225,7 @@ static int alloc_xenoprof_struct(
    21.4      bufsize += (max_samples - 1) * i;
    21.5      npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
    21.6  
    21.7 -    d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages));
    21.8 +    d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages), 0);
    21.9      if ( d->xenoprof->rawbuf == NULL )
   21.10      {
   21.11          xfree(d->xenoprof);
    22.1 --- a/xen/common/xmalloc_tlsf.c	Wed Jan 28 13:06:45 2009 +0900
    22.2 +++ b/xen/common/xmalloc_tlsf.c	Fri Jan 30 10:54:13 2009 +0900
    22.3 @@ -300,7 +300,7 @@ struct xmem_pool *xmem_pool_create(
    22.4      pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
    22.5      pool_order = get_order_from_bytes(pool_bytes);
    22.6  
    22.7 -    pool = (void *)alloc_xenheap_pages(pool_order);
    22.8 +    pool = (void *)alloc_xenheap_pages(pool_order, 0);
    22.9      if ( pool == NULL )
   22.10          return NULL;
   22.11      memset(pool, 0, pool_bytes);
   22.12 @@ -505,12 +505,12 @@ static struct xmem_pool *xenpool;
   22.13  static void *xmalloc_pool_get(unsigned long size)
   22.14  {
   22.15      ASSERT(size == PAGE_SIZE);
   22.16 -    return alloc_xenheap_pages(0);
   22.17 +    return alloc_xenheap_page();
   22.18  }
   22.19  
   22.20  static void xmalloc_pool_put(void *p)
   22.21  {
   22.22 -    free_xenheap_pages(p,0);
   22.23 +    free_xenheap_page(p);
   22.24  }
   22.25  
   22.26  static void *xmalloc_whole_pages(unsigned long size)
   22.27 @@ -518,7 +518,7 @@ static void *xmalloc_whole_pages(unsigne
   22.28      struct bhdr *b;
   22.29      unsigned int pageorder = get_order_from_bytes(size + BHDR_OVERHEAD);
   22.30  
   22.31 -    b = alloc_xenheap_pages(pageorder);
   22.32 +    b = alloc_xenheap_pages(pageorder, 0);
   22.33      if ( b == NULL )
   22.34          return NULL;
   22.35  
    23.1 --- a/xen/drivers/char/console.c	Wed Jan 28 13:06:45 2009 +0900
    23.2 +++ b/xen/drivers/char/console.c	Fri Jan 30 10:54:13 2009 +0900
    23.3 @@ -885,7 +885,7 @@ static int __init debugtrace_init(void)
    23.4          return 0;
    23.5  
    23.6      order = get_order_from_bytes(bytes);
    23.7 -    debugtrace_buf = alloc_xenheap_pages(order);
    23.8 +    debugtrace_buf = alloc_xenheap_pages(order, 0);
    23.9      ASSERT(debugtrace_buf != NULL);
   23.10  
   23.11      memset(debugtrace_buf, '\0', bytes);
    24.1 --- a/xen/drivers/char/serial.c	Wed Jan 28 13:06:45 2009 +0900
    24.2 +++ b/xen/drivers/char/serial.c	Fri Jan 30 10:54:13 2009 +0900
    24.3 @@ -495,7 +495,7 @@ void serial_async_transmit(struct serial
    24.4      BUG_ON(!port->driver->tx_empty);
    24.5      if ( port->txbuf == NULL )
    24.6          port->txbuf = alloc_xenheap_pages(
    24.7 -            get_order_from_bytes(serial_txbufsz));
    24.8 +            get_order_from_bytes(serial_txbufsz), 0);
    24.9  }
   24.10  
   24.11  /*
    25.1 --- a/xen/drivers/passthrough/vtd/dmar.c	Wed Jan 28 13:06:45 2009 +0900
    25.2 +++ b/xen/drivers/passthrough/vtd/dmar.c	Fri Jan 30 10:54:13 2009 +0900
    25.3 @@ -506,6 +506,15 @@ static int __init acpi_parse_dmar(struct
    25.4      return ret;
    25.5  }
    25.6  
    25.7 +#ifdef CONFIG_X86
    25.8 +#include <asm/tboot.h>
    25.9 +/* ACPI tables may not be DMA protected by tboot, so use DMAR copy */
   25.10 +/* SINIT saved in SinitMleData in TXT heap (which is DMA protected) */
   25.11 +#define parse_dmar_table(h) tboot_parse_dmar_table(h)
   25.12 +#else
   25.13 +#define parse_dmar_table(h) acpi_table_parse(ACPI_SIG_DMAR, h)
   25.14 +#endif
   25.15 +
   25.16  int acpi_dmar_init(void)
   25.17  {
   25.18      int rc;
   25.19 @@ -519,7 +528,7 @@ int acpi_dmar_init(void)
   25.20      if ( !iommu_enabled )
   25.21          goto fail;
   25.22  
   25.23 -    rc = acpi_table_parse(ACPI_SIG_DMAR, acpi_parse_dmar);
   25.24 +    rc = parse_dmar_table(acpi_parse_dmar);
   25.25      if ( rc )
   25.26          goto fail;
   25.27  
    26.1 --- a/xen/include/asm-x86/e820.h	Wed Jan 28 13:06:45 2009 +0900
    26.2 +++ b/xen/include/asm-x86/e820.h	Fri Jan 30 10:54:13 2009 +0900
    26.3 @@ -24,6 +24,9 @@ struct e820map {
    26.4  };
    26.5  
    26.6  extern int reserve_e820_ram(struct e820map *e820, uint64_t s, uint64_t e);
    26.7 +extern int e820_change_range_type(
    26.8 +    struct e820map *e820, uint64_t s, uint64_t e,
    26.9 +    uint32_t orig_type, uint32_t new_type);
   26.10  extern unsigned long init_e820(const char *, struct e820entry *, int *);
   26.11  extern struct e820map e820;
   26.12  
    27.1 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Wed Jan 28 13:06:45 2009 +0900
    27.2 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Fri Jan 30 10:54:13 2009 +0900
    27.3 @@ -140,7 +140,7 @@ static inline void free_amd_iommu_pgtabl
    27.4  static inline void* __alloc_amd_iommu_tables(int order)
    27.5  {
    27.6      void *buf;
    27.7 -    buf = alloc_xenheap_pages(order);
    27.8 +    buf = alloc_xenheap_pages(order, 0);
    27.9      return buf;
   27.10  }
   27.11  
    28.1 --- a/xen/include/asm-x86/mm.h	Wed Jan 28 13:06:45 2009 +0900
    28.2 +++ b/xen/include/asm-x86/mm.h	Fri Jan 30 10:54:13 2009 +0900
    28.3 @@ -31,7 +31,7 @@ struct page_info
    28.4          /* Page is in use: ((count_info & PGC_count_mask) != 0). */
    28.5          struct {
    28.6              /* Owner of this page (NULL if page is anonymous). */
    28.7 -            unsigned long _domain; /* pickled format */
    28.8 +            u32 _domain; /* pickled format */
    28.9              /* Type reference count and various PGT_xxx flags and fields. */
   28.10              unsigned long type_info;
   28.11          } inuse;
   28.12 @@ -173,8 +173,11 @@ struct page_info
   28.13  /* OOS fixup entries */
   28.14  #define SHADOW_OOS_FIXUPS 2
   28.15  
   28.16 -#define page_get_owner(_p)    ((struct domain *)(_p)->u.inuse._domain)
   28.17 -#define page_set_owner(_p,_d) ((_p)->u.inuse._domain = (unsigned long)(_d))
   28.18 +#define page_get_owner(_p)                                              \
   28.19 +    ((struct domain *)((_p)->u.inuse._domain ?                          \
   28.20 +                       mfn_to_virt((_p)->u.inuse._domain) : NULL))
   28.21 +#define page_set_owner(_p,_d)                                           \
   28.22 +    ((_p)->u.inuse._domain = (_d) ? virt_to_mfn(_d) : 0)
   28.23  
   28.24  #define maddr_get_owner(ma)   (page_get_owner(maddr_to_page((ma))))
   28.25  #define vaddr_get_owner(va)   (page_get_owner(virt_to_page((va))))
    29.1 --- a/xen/include/asm-x86/tboot.h	Wed Jan 28 13:06:45 2009 +0900
    29.2 +++ b/xen/include/asm-x86/tboot.h	Fri Jan 30 10:54:13 2009 +0900
    29.3 @@ -37,7 +37,13 @@
    29.4  #ifndef __TBOOT_H__
    29.5  #define __TBOOT_H__
    29.6  
    29.7 -typedef struct __attribute__ ((__packed__)) {
    29.8 +#include <xen/acpi.h>
    29.9 +
   29.10 +#ifndef __packed
   29.11 +#define __packed   __attribute__ ((packed))
   29.12 +#endif
   29.13 +
   29.14 +typedef struct __packed {
   29.15    uint32_t    data1;
   29.16    uint16_t    data2;
   29.17    uint16_t    data3;
   29.18 @@ -47,31 +53,47 @@ typedef struct __attribute__ ((__packed_
   29.19  
   29.20  /* used to communicate between tboot and the launched kernel (i.e. Xen) */
   29.21  
   29.22 -typedef struct __attribute__ ((__packed__)) {
   29.23 -    uint16_t pm1a_cnt;
   29.24 -    uint16_t pm1b_cnt;
   29.25 -    uint16_t pm1a_evt;
   29.26 -    uint16_t pm1b_evt;
   29.27 +#define MAX_TB_MAC_REGIONS      32
   29.28 +typedef struct __packed {
   29.29 +    uint64_t  start;
   29.30 +    uint64_t  end;
   29.31 +} tboot_mac_region_t;
   29.32 +
   29.33 +/* GAS - Generic Address Structure (ACPI 2.0+) */
   29.34 +typedef struct __packed {
   29.35 +	uint8_t  space_id;
   29.36 +	uint8_t  bit_width;
   29.37 +	uint8_t  bit_offset;
   29.38 +	uint8_t  access_width;
   29.39 +	uint64_t address;
   29.40 +} tboot_acpi_generic_address_t;
   29.41 +
   29.42 +typedef struct __packed {
   29.43 +    tboot_acpi_generic_address_t pm1a_cnt_blk;
   29.44 +    tboot_acpi_generic_address_t pm1b_cnt_blk;
   29.45 +    tboot_acpi_generic_address_t pm1a_evt_blk;
   29.46 +    tboot_acpi_generic_address_t pm1b_evt_blk;
   29.47      uint16_t pm1a_cnt_val;
   29.48      uint16_t pm1b_cnt_val;
   29.49 -} tboot_acpi_sleep_info;
   29.50 +    uint64_t wakeup_vector;
   29.51 +    uint32_t vector_width;
   29.52 +    uint64_t kernel_s3_resume_vector;
   29.53 +} tboot_acpi_sleep_info_t;
   29.54  
   29.55 -typedef struct __attribute__ ((__packed__)) {
   29.56 -    /* version 0x01+ fields: */
   29.57 +typedef struct __packed {
   29.58 +    /* version 3+ fields: */
   29.59      uuid_t    uuid;              /* {663C8DFF-E8B3-4b82-AABF-19EA4D057A08} */
   29.60 -    uint32_t  version;           /* Version number: 0x01, 0x02, ... */
   29.61 +    uint32_t  version;           /* Version number; currently supports 0.3 */
   29.62      uint32_t  log_addr;          /* physical addr of tb_log_t log */
   29.63 -    uint32_t  shutdown_entry32;  /* entry point for tboot shutdown from 32b */
   29.64 -    uint32_t  shutdown_entry64;  /* entry point for tboot shutdown from 64b */
   29.65 +    uint32_t  shutdown_entry;    /* entry point for tboot shutdown */
   29.66      uint32_t  shutdown_type;     /* type of shutdown (TB_SHUTDOWN_*) */
   29.67 -    uint32_t  s3_tb_wakeup_entry;/* entry point for tboot s3 wake up */
   29.68 -    uint32_t  s3_k_wakeup_entry; /* entry point for xen s3 wake up */
   29.69 -    tboot_acpi_sleep_info
   29.70 +    tboot_acpi_sleep_info_t
   29.71                acpi_sinfo;        /* where kernel put acpi sleep info in Sx */
   29.72 -    uint8_t   reserved[52];      /* this pad is for compat with old field */
   29.73 -    /* version 0x02+ fields: */
   29.74      uint32_t  tboot_base;        /* starting addr for tboot */
   29.75      uint32_t  tboot_size;        /* size of tboot */
   29.76 +    uint8_t   num_mac_regions;   /* number mem regions to MAC on S3 */
   29.77 +                                 /* contig regions memory to MAC on S3 */
   29.78 +    tboot_mac_region_t mac_regions[MAX_TB_MAC_REGIONS];
   29.79  } tboot_shared_t;
   29.80  
   29.81  #define TB_SHUTDOWN_REBOOT      0
   29.82 @@ -89,6 +111,8 @@ extern tboot_shared_t *g_tboot_shared;
   29.83  void tboot_probe(void);
   29.84  void tboot_shutdown(uint32_t shutdown_type);
   29.85  int tboot_in_measured_env(void);
   29.86 +int tboot_protect_mem_regions(void);
   29.87 +int tboot_parse_dmar_table(acpi_table_handler dmar_handler);
   29.88  
   29.89  #endif /* __TBOOT_H__ */
   29.90  
    30.1 --- a/xen/include/xen/domain.h	Wed Jan 28 13:06:45 2009 +0900
    30.2 +++ b/xen/include/xen/domain.h	Fri Jan 30 10:54:13 2009 +0900
    30.3 @@ -23,6 +23,10 @@ void getdomaininfo(struct domain *d, str
    30.4   * Arch-specifics.
    30.5   */
    30.6  
    30.7 +/* Allocate/free a domain structure. */
    30.8 +struct domain *alloc_domain_struct(void);
    30.9 +void free_domain_struct(struct domain *d);
   30.10 +
   30.11  /* Allocate/free a VCPU structure. */
   30.12  struct vcpu *alloc_vcpu_struct(void);
   30.13  void free_vcpu_struct(struct vcpu *v);
    31.1 --- a/xen/include/xen/grant_table.h	Wed Jan 28 13:06:45 2009 +0900
    31.2 +++ b/xen/include/xen/grant_table.h	Fri Jan 30 10:54:13 2009 +0900
    31.3 @@ -147,7 +147,4 @@ nr_active_grant_frames(struct grant_tabl
    31.4      return num_act_frames_from_sha_frames(nr_grant_frames(gt));
    31.5  }
    31.6  
    31.7 -int
    31.8 -gnttab_is_granted(struct domain *d, xen_pfn_t gfn, int order);
    31.9 -
   31.10  #endif /* __XEN_GRANT_TABLE_H__ */
    32.1 --- a/xen/include/xen/mm.h	Wed Jan 28 13:06:45 2009 +0900
    32.2 +++ b/xen/include/xen/mm.h	Fri Jan 30 10:54:13 2009 +0900
    32.3 @@ -45,9 +45,9 @@ void end_boot_allocator(void);
    32.4  
    32.5  /* Xen suballocator. These functions are interrupt-safe. */
    32.6  void init_xenheap_pages(paddr_t ps, paddr_t pe);
    32.7 -void *alloc_xenheap_pages(unsigned int order);
    32.8 +void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);
    32.9  void free_xenheap_pages(void *v, unsigned int order);
   32.10 -#define alloc_xenheap_page() (alloc_xenheap_pages(0))
   32.11 +#define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
   32.12  #define free_xenheap_page(v) (free_xenheap_pages(v,0))
   32.13  
   32.14  /* Domain suballocator. These functions are *not* interrupt-safe.*/