ia64/xen-unstable

changeset 7989:c7508abc5b6b

Add -Wdeclaration-after-statement to Xen and tools build.
Fix the compile errors that result from this.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Nov 22 18:44:08 2005 +0100 (2005-11-22)
parents f7bee3cb1bf1
children 393256b2ead0
files Config.mk tools/libxc/xc_private.h tools/libxc/xg_private.h tools/xenstore/xenstore_client.c xen/arch/x86/audit.c xen/arch/x86/dm/vmx_vioapic.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/arch/x86/shadow32.c xen/arch/x86/shadow_public.c xen/arch/x86/vmx.c xen/arch/x86/x86_32/traps.c xen/common/sched_sedf.c xen/include/asm-x86/shadow.h
line diff
     1.1 --- a/Config.mk	Tue Nov 22 18:21:22 2005 +0100
     1.2 +++ b/Config.mk	Tue Nov 22 18:44:08 2005 +0100
     1.3 @@ -8,6 +8,7 @@ XEN_TARGET_X86_PAE  ?= n
     1.4  # Tools to run on system hosting the build
     1.5  HOSTCC     = gcc
     1.6  HOSTCFLAGS = -Wall -Werror -Wstrict-prototypes -O2 -fomit-frame-pointer
     1.7 +HOSTCFLAGS += -Wdeclaration-after-statement
     1.8  
     1.9  AS         = $(CROSS_COMPILE)as
    1.10  LD         = $(CROSS_COMPILE)ld
    1.11 @@ -38,6 +39,8 @@ EXTRA_INCLUDES += $(EXTRA_PREFIX)/includ
    1.12  EXTRA_LIB += $(EXTRA_PREFIX)/$(LIBDIR)
    1.13  endif
    1.14  
    1.15 +CFLAGS += -Wdeclaration-after-statement 
    1.16 +
    1.17  LDFLAGS += $(foreach i, $(EXTRA_LIB), -L$(i)) 
    1.18  CFLAGS += $(foreach i, $(EXTRA_INCLUDES), -I$(i))
    1.19  
     2.1 --- a/tools/libxc/xc_private.h	Tue Nov 22 18:21:22 2005 +0100
     2.2 +++ b/tools/libxc/xc_private.h	Tue Nov 22 18:44:08 2005 +0100
     2.3 @@ -21,9 +21,8 @@
     2.4     reason, we must zero the privcmd_hypercall_t or dom0_op_t instance before a
     2.5     call, if using valgrind.  */
     2.6  #ifdef VALGRIND
     2.7 -#define DECLARE_HYPERCALL privcmd_hypercall_t hypercall; \
     2.8 -  memset(&hypercall, 0, sizeof(hypercall))
     2.9 -#define DECLARE_DOM0_OP dom0_op_t op; memset(&op, 0, sizeof(op))
    2.10 +#define DECLARE_HYPERCALL privcmd_hypercall_t hypercall = { 0 }
    2.11 +#define DECLARE_DOM0_OP dom0_op_t op = { 0 }
    2.12  #else
    2.13  #define DECLARE_HYPERCALL privcmd_hypercall_t hypercall
    2.14  #define DECLARE_DOM0_OP dom0_op_t op
     3.1 --- a/tools/libxc/xg_private.h	Tue Nov 22 18:21:22 2005 +0100
     3.2 +++ b/tools/libxc/xg_private.h	Tue Nov 22 18:44:08 2005 +0100
     3.3 @@ -20,7 +20,7 @@
     3.4     reason, we must zero the dom0_op_t instance before a call, if using
     3.5     valgrind.  */
     3.6  #ifdef VALGRIND
     3.7 -#define DECLARE_DOM0_OP dom0_op_t op; memset(&op, 0, sizeof(op))
     3.8 +#define DECLARE_DOM0_OP dom0_op_t op = { 0 }
     3.9  #else
    3.10  #define DECLARE_DOM0_OP dom0_op_t op
    3.11  #endif
     4.1 --- a/tools/xenstore/xenstore_client.c	Tue Nov 22 18:21:22 2005 +0100
     4.2 +++ b/tools/xenstore/xenstore_client.c	Tue Nov 22 18:44:08 2005 +0100
     4.3 @@ -109,7 +109,7 @@ perform(int optind, int argc, char **arg
     4.4             necessary.
     4.5          */
     4.6  
     4.7 -        char *path = argv[optind];
     4.8 +        char *slash, *path = argv[optind];
     4.9  
    4.10          if (tidy) {
    4.11              /* Copy path, because we can't modify argv because we will need it
    4.12 @@ -123,7 +123,7 @@ perform(int optind, int argc, char **arg
    4.13                  return 1;
    4.14              }
    4.15  
    4.16 -            char *slash = strrchr(p, '/');
    4.17 +            slash = strrchr(p, '/');
    4.18              if (slash) {
    4.19                  char *val;
    4.20                  *slash = '\0';
     5.1 --- a/xen/arch/x86/audit.c	Tue Nov 22 18:21:22 2005 +0100
     5.2 +++ b/xen/arch/x86/audit.c	Tue Nov 22 18:44:08 2005 +0100
     5.3 @@ -55,10 +55,11 @@ int audit_adjust_pgtables(struct domain 
     5.4  
     5.5      void _adjust(struct pfn_info *page, int adjtype ADJUST_EXTRA_ARGS)
     5.6      {
     5.7 +        int count;
     5.8 +
     5.9          if ( adjtype )
    5.10          {
    5.11 -            // adjust the type count
    5.12 -            //
    5.13 +            /* adjust the type count */
    5.14              int tcount = page->u.inuse.type_info & PGT_count_mask;
    5.15              tcount += dir;
    5.16              ttot++;
    5.17 @@ -92,10 +93,8 @@ int audit_adjust_pgtables(struct domain 
    5.18                  page->u.inuse.type_info += dir;
    5.19          }
    5.20  
    5.21 -        // adjust the general count
    5.22 -        //
    5.23 -        int count = page->count_info & PGC_count_mask;
    5.24 -        count += dir;
    5.25 +        /* adjust the general count */
    5.26 +        count = (page->count_info & PGC_count_mask) + dir;
    5.27          ctot++;
    5.28  
    5.29          if ( count < 0 )
    5.30 @@ -124,6 +123,7 @@ int audit_adjust_pgtables(struct domain 
    5.31      {
    5.32          unsigned long *pt = map_domain_page(mfn);
    5.33          int i;
    5.34 +        u32 page_type;
    5.35  
    5.36          for ( i = 0; i < l2limit; i++ )
    5.37          {
    5.38 @@ -147,8 +147,7 @@ int audit_adjust_pgtables(struct domain 
    5.39                              continue;
    5.40                          }
    5.41  
    5.42 -                        u32 page_type = l1page->u.inuse.type_info & PGT_type_mask;
    5.43 -
    5.44 +                        page_type = l1page->u.inuse.type_info & PGT_type_mask;
    5.45                          if ( page_type != PGT_l1_shadow )
    5.46                          {
    5.47                              printk("Audit %d: [Shadow L2 mfn=%lx i=%x] "
    5.48 @@ -174,8 +173,7 @@ int audit_adjust_pgtables(struct domain 
    5.49                              continue;
    5.50                          }
    5.51  
    5.52 -                        u32 page_type = l1page->u.inuse.type_info & PGT_type_mask;
    5.53 -
    5.54 +                        page_type = l1page->u.inuse.type_info & PGT_type_mask;
    5.55                          if ( page_type == PGT_l2_page_table )
    5.56                          {
    5.57                              printk("Audit %d: [%x] Found %s Linear PT "
    5.58 @@ -741,6 +739,7 @@ void _audit_domain(struct domain *d, int
    5.59      while ( list_ent != &d->page_list )
    5.60      {
    5.61          u32 page_type;
    5.62 +        unsigned long pfn;
    5.63  
    5.64          page = list_entry(list_ent, struct pfn_info, list);
    5.65          mfn = page_to_pfn(page);
    5.66 @@ -797,7 +796,7 @@ void _audit_domain(struct domain *d, int
    5.67                  printk("out of sync page mfn=%lx is not a page table\n", mfn);
    5.68                  errors++;
    5.69              }
    5.70 -            unsigned long pfn = __mfn_to_gpfn(d, mfn);
    5.71 +            pfn = __mfn_to_gpfn(d, mfn);
    5.72              if ( !__shadow_status(d, pfn, PGT_snapshot) )
    5.73              {
    5.74                  printk("out of sync page mfn=%lx doesn't have a snapshot\n",
     6.1 --- a/xen/arch/x86/dm/vmx_vioapic.c	Tue Nov 22 18:21:22 2005 +0100
     6.2 +++ b/xen/arch/x86/dm/vmx_vioapic.c	Tue Nov 22 18:44:08 2005 +0100
     6.3 @@ -52,8 +52,6 @@ static void ioapic_enable(vmx_vioapic_t 
     6.4  
     6.5  static void ioapic_dump_redir(vmx_vioapic_t *s, uint8_t entry)
     6.6  {
     6.7 -    ASSERT(s);
     6.8 -
     6.9      RedirStatus redir = s->redirtbl[entry];
    6.10  
    6.11      VMX_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_dump_redir "
     7.1 --- a/xen/arch/x86/mm.c	Tue Nov 22 18:21:22 2005 +0100
     7.2 +++ b/xen/arch/x86/mm.c	Tue Nov 22 18:44:08 2005 +0100
     7.3 @@ -521,10 +521,10 @@ get_page_from_l3e(
     7.4      l3_pgentry_t l3e, unsigned long pfn,
     7.5      struct domain *d, unsigned long vaddr)
     7.6  {
     7.7 -    ASSERT( !shadow_mode_refcounts(d) );
     7.8 -
     7.9      int rc;
    7.10  
    7.11 +    ASSERT(!shadow_mode_refcounts(d));
    7.12 +
    7.13      if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
    7.14          return 1;
    7.15  
    7.16 @@ -1880,19 +1880,18 @@ int do_mmuext_op(
    7.17  
    7.18          case MMUEXT_SET_LDT:
    7.19          {
    7.20 +            unsigned long ptr  = op.arg1.linear_addr;
    7.21 +            unsigned long ents = op.arg2.nr_ents;
    7.22 +
    7.23              if ( shadow_mode_external(d) )
    7.24              {
    7.25                  MEM_LOG("ignoring SET_LDT hypercall from external "
    7.26                          "domain %u", d->domain_id);
    7.27                  okay = 0;
    7.28 -                break;
    7.29              }
    7.30 -
    7.31 -            unsigned long ptr  = op.arg1.linear_addr;
    7.32 -            unsigned long ents = op.arg2.nr_ents;
    7.33 -            if ( ((ptr & (PAGE_SIZE-1)) != 0) || 
    7.34 -                 (ents > 8192) ||
    7.35 -                 !array_access_ok(ptr, ents, LDT_ENTRY_SIZE) )
    7.36 +            else if ( ((ptr & (PAGE_SIZE-1)) != 0) || 
    7.37 +                      (ents > 8192) ||
    7.38 +                      !array_access_ok(ptr, ents, LDT_ENTRY_SIZE) )
    7.39              {
    7.40                  okay = 0;
    7.41                  MEM_LOG("Bad args to SET_LDT: ptr=%lx, ents=%lx", ptr, ents);
     8.1 --- a/xen/arch/x86/shadow.c	Tue Nov 22 18:21:22 2005 +0100
     8.2 +++ b/xen/arch/x86/shadow.c	Tue Nov 22 18:44:08 2005 +0100
     8.3 @@ -207,6 +207,7 @@ alloc_shadow_page(struct domain *d,
     8.4      struct pfn_info *page;
     8.5      unsigned long smfn;
     8.6      int pin = 0;
     8.7 +    void *l1, *lp;
     8.8  
     8.9      // Currently, we only keep pre-zero'ed pages around for use as L1's...
    8.10      // This will change.  Soon.
    8.11 @@ -232,19 +233,19 @@ alloc_shadow_page(struct domain *d,
    8.12                  if (!page)
    8.13                      goto no_shadow_page;
    8.14  
    8.15 -                void *l1_0 = map_domain_page(page_to_pfn(page));
    8.16 -                memset(l1_0, 0, PAGE_SIZE);
    8.17 -                unmap_domain_page(l1_0);
    8.18 -
    8.19 -                void *l1_1 = map_domain_page(page_to_pfn(page+1));
    8.20 -                memset(l1_1, 0, PAGE_SIZE);
    8.21 -                unmap_domain_page(l1_1);
    8.22 +                l1 = map_domain_page(page_to_pfn(page));
    8.23 +                memset(l1, 0, PAGE_SIZE);
    8.24 +                unmap_domain_page(l1);
    8.25 +
    8.26 +                l1 = map_domain_page(page_to_pfn(page+1));
    8.27 +                memset(l1, 0, PAGE_SIZE);
    8.28 +                unmap_domain_page(l1);
    8.29  #else
    8.30                  page = alloc_domheap_page(NULL);
    8.31                  if (!page)
    8.32                      goto no_shadow_page;
    8.33  
    8.34 -                void *l1 = map_domain_page(page_to_pfn(page));
    8.35 +                l1 = map_domain_page(page_to_pfn(page));
    8.36                  memset(l1, 0, PAGE_SIZE);
    8.37                  unmap_domain_page(l1);
    8.38  #endif
    8.39 @@ -255,7 +256,7 @@ alloc_shadow_page(struct domain *d,
    8.40                  if (!page)
    8.41                      goto no_shadow_page;
    8.42  
    8.43 -                void *l1 = map_domain_page(page_to_pfn(page));
    8.44 +                l1 = map_domain_page(page_to_pfn(page));
    8.45                  memset(l1, 0, PAGE_SIZE);
    8.46                  unmap_domain_page(l1);
    8.47              }
    8.48 @@ -279,7 +280,7 @@ alloc_shadow_page(struct domain *d,
    8.49          if (!page)
    8.50              goto no_shadow_page;
    8.51  
    8.52 -        void *lp = map_domain_page(page_to_pfn(page));
    8.53 +        lp = map_domain_page(page_to_pfn(page));
    8.54          memset(lp, 0, PAGE_SIZE);
    8.55          unmap_domain_page(lp);
    8.56      }
    8.57 @@ -588,9 +589,11 @@ static void shadow_map_l1_into_current_l
    8.58      }
    8.59  
    8.60  #ifndef NDEBUG
    8.61 -    l2_pgentry_t old_sl2e;
    8.62 -    __shadow_get_l2e(v, va, &old_sl2e);
    8.63 -    ASSERT( !(l2e_get_flags(old_sl2e) & _PAGE_PRESENT) );
    8.64 +    {
    8.65 +        l2_pgentry_t old_sl2e;
    8.66 +        __shadow_get_l2e(v, va, &old_sl2e);
    8.67 +        ASSERT(!(l2e_get_flags(old_sl2e) & _PAGE_PRESENT));
    8.68 +    }
    8.69  #endif
    8.70  
    8.71  #if CONFIG_PAGING_LEVELS >=3
    8.72 @@ -952,14 +955,16 @@ static struct out_of_sync_entry *
    8.73      ASSERT(pfn_valid(mfn));
    8.74  
    8.75  #ifndef NDEBUG
    8.76 -    u32 type = page->u.inuse.type_info & PGT_type_mask;
    8.77 -    if ( shadow_mode_refcounts(d) )
    8.78      {
    8.79 -        ASSERT(type == PGT_writable_page);
    8.80 -    }
    8.81 -    else
    8.82 -    {
    8.83 -        ASSERT(type && (type < PGT_l4_page_table));
    8.84 +        u32 type = page->u.inuse.type_info & PGT_type_mask;
    8.85 +        if ( shadow_mode_refcounts(d) )
    8.86 +        {
    8.87 +            ASSERT(type == PGT_writable_page);
    8.88 +        }
    8.89 +        else
    8.90 +        {
    8.91 +            ASSERT(type && (type < PGT_l4_page_table));
    8.92 +        }
    8.93      }
    8.94  #endif
    8.95  
    8.96 @@ -1438,6 +1443,8 @@ static int resync_all(struct domain *d, 
    8.97      int need_flush = 0, external = shadow_mode_external(d);
    8.98      int unshadow;
    8.99      int changed;
   8.100 +    u32 min_max_shadow, min_max_snapshot;
   8.101 +    int min_shadow, max_shadow, min_snapshot, max_snapshot;
   8.102  
   8.103      ASSERT(shadow_lock_is_acquired(d));
   8.104  
   8.105 @@ -1466,7 +1473,7 @@ static int resync_all(struct domain *d, 
   8.106                  continue;
   8.107          }
   8.108  
   8.109 -       FSH_LOG("resyncing t=%08x gpfn=%lx gmfn=%lx smfn=%lx snapshot_mfn=%lx",
   8.110 +        FSH_LOG("resyncing t=%08x gpfn=%lx gmfn=%lx smfn=%lx snapshot_mfn=%lx",
   8.111                  stype, entry->gpfn, entry->gmfn, smfn, entry->snapshot_mfn);
   8.112  
   8.113          // Compare guest's new contents to its snapshot, validating
   8.114 @@ -1482,16 +1489,16 @@ static int resync_all(struct domain *d, 
   8.115  
   8.116          unshadow = 0;
   8.117  
   8.118 -        u32 min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp;
   8.119 -        int min_shadow = SHADOW_MIN(min_max_shadow);
   8.120 -        int max_shadow = SHADOW_MAX(min_max_shadow);
   8.121 -
   8.122 -        u32 min_max_snapshot =
   8.123 -          pfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
   8.124 -        int min_snapshot = SHADOW_MIN(min_max_snapshot);
   8.125 -        int max_snapshot = SHADOW_MAX(min_max_snapshot);
   8.126 -
   8.127 -        switch ( stype ) {
   8.128 +        min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp;
   8.129 +        min_shadow     = SHADOW_MIN(min_max_shadow);
   8.130 +        max_shadow     = SHADOW_MAX(min_max_shadow);
   8.131 +
   8.132 +        min_max_snapshot= pfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
   8.133 +        min_snapshot    = SHADOW_MIN(min_max_snapshot);
   8.134 +        max_snapshot    = SHADOW_MAX(min_max_snapshot);
   8.135 +
   8.136 +        switch ( stype )
   8.137 +        {
   8.138          case PGT_l1_shadow:
   8.139          {
   8.140              guest_l1_pgentry_t *guest1 = guest;
   8.141 @@ -1680,9 +1687,9 @@ static int resync_all(struct domain *d, 
   8.142              changed = 0;
   8.143              for ( i = 0; i < GUEST_ROOT_PAGETABLE_ENTRIES; i++ )
   8.144              {
   8.145 +                guest_root_pgentry_t new_root_e = guest_root[i];
   8.146                  if ( !is_guest_l4_slot(i) && !external )
   8.147                      continue;
   8.148 -                guest_root_pgentry_t new_root_e = guest_root[i];
   8.149                  if ( root_entry_has_changed(
   8.150                          new_root_e, snapshot_root[i], PAGE_FLAG_MASK))
   8.151                  {
   8.152 @@ -1749,6 +1756,7 @@ static void sync_all(struct domain *d)
   8.153  {
   8.154      struct out_of_sync_entry *entry;
   8.155      int need_flush = 0;
   8.156 +    l1_pgentry_t *ppte, opte, npte;
   8.157  
   8.158      perfc_incrc(shadow_sync_all);
   8.159  
   8.160 @@ -1764,11 +1772,10 @@ static void sync_all(struct domain *d)
   8.161          if ( entry->writable_pl1e & (sizeof(l1_pgentry_t)-1) )
   8.162              continue;
   8.163  
   8.164 -        l1_pgentry_t *ppte = (l1_pgentry_t *)(
   8.165 +        ppte = (l1_pgentry_t *)(
   8.166              (char *)map_domain_page(entry->writable_pl1e >> PAGE_SHIFT) +
   8.167              (entry->writable_pl1e & ~PAGE_MASK));
   8.168 -        l1_pgentry_t opte = *ppte;
   8.169 -        l1_pgentry_t npte = opte;
   8.170 +        opte = npte = *ppte;
   8.171          l1e_remove_flags(npte, _PAGE_RW);
   8.172  
   8.173          if ( (l1e_get_flags(npte) & _PAGE_PRESENT) &&
   8.174 @@ -2821,6 +2828,7 @@ static inline unsigned long init_bl2(l4_
   8.175      unsigned int count;
   8.176      unsigned long sl2mfn;
   8.177      struct pfn_info *page;
   8.178 +    void *l2;
   8.179  
   8.180      memset(spl4e, 0, PAGE_SIZE);
   8.181  
   8.182 @@ -2835,7 +2843,7 @@ static inline unsigned long init_bl2(l4_
   8.183      for (count = 0; count < PDP_ENTRIES; count++)
   8.184      {
   8.185          sl2mfn = page_to_pfn(page+count);
   8.186 -        void *l2 = map_domain_page(sl2mfn);
   8.187 +        l2 = map_domain_page(sl2mfn);
   8.188          memset(l2, 0, PAGE_SIZE);
   8.189          unmap_domain_page(l2);
   8.190          spl4e[count] = l4e_from_pfn(sl2mfn, _PAGE_PRESENT);
     9.1 --- a/xen/arch/x86/shadow32.c	Tue Nov 22 18:21:22 2005 +0100
     9.2 +++ b/xen/arch/x86/shadow32.c	Tue Nov 22 18:44:08 2005 +0100
     9.3 @@ -208,6 +208,7 @@ alloc_shadow_page(struct domain *d,
     9.4      struct pfn_info *page;
     9.5      unsigned long smfn;
     9.6      int pin = 0;
     9.7 +    void *l1;
     9.8  
     9.9      // Currently, we only keep pre-zero'ed pages around for use as L1's...
    9.10      // This will change.  Soon.
    9.11 @@ -224,7 +225,7 @@ alloc_shadow_page(struct domain *d,
    9.12          else
    9.13          {
    9.14              page = alloc_domheap_page(NULL);
    9.15 -            void *l1 = map_domain_page(page_to_pfn(page));
    9.16 +            l1 = map_domain_page(page_to_pfn(page));
    9.17              memset(l1, 0, PAGE_SIZE);
    9.18              unmap_domain_page(l1);
    9.19          }
    9.20 @@ -558,6 +559,7 @@ static void free_shadow_pages(struct dom
    9.21      int                   i;
    9.22      struct shadow_status *x;
    9.23      struct vcpu          *v;
    9.24 +    struct list_head *list_ent, *tmp;
    9.25   
    9.26      /*
    9.27       * WARNING! The shadow page table must not currently be in use!
    9.28 @@ -697,15 +699,14 @@ static void free_shadow_pages(struct dom
    9.29          xfree(mfn_list);
    9.30      }
    9.31  
    9.32 -    // Now free the pre-zero'ed pages from the domain
    9.33 -    //
    9.34 -    struct list_head *list_ent, *tmp;
    9.35 +    /* Now free the pre-zero'ed pages from the domain */
    9.36      list_for_each_safe(list_ent, tmp, &d->arch.free_shadow_frames)
    9.37      {
    9.38 +        struct pfn_info *page = list_entry(list_ent, struct pfn_info, list);
    9.39 +
    9.40          list_del(list_ent);
    9.41          perfc_decr(free_l1_pages);
    9.42  
    9.43 -        struct pfn_info *page = list_entry(list_ent, struct pfn_info, list);
    9.44          free_domheap_page(page);
    9.45      }
    9.46  
    9.47 @@ -1218,6 +1219,11 @@ static void free_out_of_sync_entries(str
    9.48  
    9.49  void __shadow_mode_disable(struct domain *d)
    9.50  {
    9.51 +    struct vcpu *v;
    9.52 +#ifndef NDEBUG
    9.53 +    int i;
    9.54 +#endif
    9.55 +
    9.56      if ( unlikely(!shadow_mode_enabled(d)) )
    9.57          return;
    9.58  
    9.59 @@ -1225,7 +1231,6 @@ void __shadow_mode_disable(struct domain
    9.60      free_writable_pte_predictions(d);
    9.61  
    9.62  #ifndef NDEBUG
    9.63 -    int i;
    9.64      for ( i = 0; i < shadow_ht_buckets; i++ )
    9.65      {
    9.66          if ( d->arch.shadow_ht[i].gpfn_and_flags != 0 )
    9.67 @@ -1242,11 +1247,8 @@ void __shadow_mode_disable(struct domain
    9.68      free_shadow_ht_entries(d);
    9.69      free_out_of_sync_entries(d);
    9.70  
    9.71 -    struct vcpu *v;
    9.72      for_each_vcpu(d, v)
    9.73 -    {
    9.74          update_pagetables(v);
    9.75 -    }
    9.76  }
    9.77  
    9.78  static int shadow_mode_table_op(
    9.79 @@ -1423,14 +1425,18 @@ int shadow_mode_control(struct domain *d
    9.80  unsigned long
    9.81  gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
    9.82  {
    9.83 -    ASSERT( shadow_mode_translate(d) );
    9.84 +    unsigned long va, tabpfn;
    9.85 +    l1_pgentry_t *l1, l1e;
    9.86 +    l2_pgentry_t *l2, l2e;
    9.87 +
    9.88 +    ASSERT(shadow_mode_translate(d));
    9.89  
    9.90      perfc_incrc(gpfn_to_mfn_foreign);
    9.91  
    9.92 -    unsigned long va = gpfn << PAGE_SHIFT;
    9.93 -    unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table);
    9.94 -    l2_pgentry_t *l2 = map_domain_page(tabpfn);
    9.95 -    l2_pgentry_t l2e = l2[l2_table_offset(va)];
    9.96 +    va = gpfn << PAGE_SHIFT;
    9.97 +    tabpfn = pagetable_get_pfn(d->arch.phys_table);
    9.98 +    l2 = map_domain_page(tabpfn);
    9.99 +    l2e = l2[l2_table_offset(va)];
   9.100      unmap_domain_page(l2);
   9.101      if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
   9.102      {
   9.103 @@ -1438,8 +1444,8 @@ gpfn_to_mfn_foreign(struct domain *d, un
   9.104                 d->domain_id, gpfn, l2e_get_intpte(l2e));
   9.105          return INVALID_MFN;
   9.106      }
   9.107 -    l1_pgentry_t *l1 = map_domain_page(l2e_get_pfn(l2e));
   9.108 -    l1_pgentry_t l1e = l1[l1_table_offset(va)];
   9.109 +    l1 = map_domain_page(l2e_get_pfn(l2e));
   9.110 +    l1e = l1[l1_table_offset(va)];
   9.111      unmap_domain_page(l1);
   9.112  
   9.113  #if 0
   9.114 @@ -1634,9 +1640,11 @@ void shadow_map_l1_into_current_l2(unsig
   9.115      }
   9.116  
   9.117  #ifndef NDEBUG
   9.118 -    l2_pgentry_t old_sl2e;
   9.119 -    __shadow_get_l2e(v, va, &old_sl2e);
   9.120 -    ASSERT( !(l2e_get_flags(old_sl2e) & _PAGE_PRESENT) );
   9.121 +    {
   9.122 +        l2_pgentry_t old_sl2e;
   9.123 +        __shadow_get_l2e(v, va, &old_sl2e);
   9.124 +        ASSERT( !(l2e_get_flags(old_sl2e) & _PAGE_PRESENT) );
   9.125 +    }
   9.126  #endif
   9.127  
   9.128      if ( !get_shadow_ref(sl1mfn) )
   9.129 @@ -1840,14 +1848,16 @@ struct out_of_sync_entry *
   9.130      ASSERT(pfn_valid(mfn));
   9.131  
   9.132  #ifndef NDEBUG
   9.133 -    u32 type = page->u.inuse.type_info & PGT_type_mask;
   9.134 -    if ( shadow_mode_refcounts(d) )
   9.135      {
   9.136 -        ASSERT(type == PGT_writable_page);
   9.137 -    }
   9.138 -    else
   9.139 -    {
   9.140 -        ASSERT(type && (type < PGT_l4_page_table));
   9.141 +        u32 type = page->u.inuse.type_info & PGT_type_mask;
   9.142 +        if ( shadow_mode_refcounts(d) )
   9.143 +        {
   9.144 +            ASSERT(type == PGT_writable_page);
   9.145 +        }
   9.146 +        else
   9.147 +        {
   9.148 +            ASSERT(type && (type < PGT_l4_page_table));
   9.149 +        }
   9.150      }
   9.151  #endif
   9.152  
   9.153 @@ -2329,6 +2339,8 @@ static int resync_all(struct domain *d, 
   9.154      int need_flush = 0, external = shadow_mode_external(d);
   9.155      int unshadow;
   9.156      int changed;
   9.157 +    u32 min_max_shadow, min_max_snapshot;
   9.158 +    int min_shadow, max_shadow, min_snapshot, max_snapshot;
   9.159  
   9.160      ASSERT(shadow_lock_is_acquired(d));
   9.161  
   9.162 @@ -2388,14 +2400,14 @@ static int resync_all(struct domain *d, 
   9.163              if ( !smfn )
   9.164                  break;
   9.165  
   9.166 -            u32 min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp;
   9.167 -            int min_shadow = SHADOW_MIN(min_max_shadow);
   9.168 -            int max_shadow = SHADOW_MAX(min_max_shadow);
   9.169 -
   9.170 -            u32 min_max_snapshot =
   9.171 +            min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp;
   9.172 +            min_shadow     = SHADOW_MIN(min_max_shadow);
   9.173 +            max_shadow     = SHADOW_MAX(min_max_shadow);
   9.174 +
   9.175 +            min_max_snapshot =
   9.176                  pfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
   9.177 -            int min_snapshot = SHADOW_MIN(min_max_snapshot);
   9.178 -            int max_snapshot = SHADOW_MAX(min_max_snapshot);
   9.179 +            min_snapshot     = SHADOW_MIN(min_max_snapshot);
   9.180 +            max_snapshot     = SHADOW_MAX(min_max_snapshot);
   9.181  
   9.182              changed = 0;
   9.183  
   9.184 @@ -2454,13 +2466,11 @@ static int resync_all(struct domain *d, 
   9.185              changed = 0;
   9.186              for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
   9.187              {
   9.188 -#if CONFIG_X86_PAE
   9.189 -                BUG();  /* FIXME: need type_info */
   9.190 -#endif
   9.191 +                l2_pgentry_t new_pde = guest2[i];
   9.192 +
   9.193                  if ( !is_guest_l2_slot(0,i) && !external )
   9.194                      continue;
   9.195  
   9.196 -                l2_pgentry_t new_pde = guest2[i];
   9.197                  if ( l2e_has_changed(new_pde, snapshot2[i], PAGE_FLAG_MASK))
   9.198                  {
   9.199                      need_flush |= validate_pde_change(d, new_pde, &shadow2[i]);
   9.200 @@ -2500,13 +2510,11 @@ static int resync_all(struct domain *d, 
   9.201              changed = 0;
   9.202              for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
   9.203              {
   9.204 -#if CONFIG_X86_PAE
   9.205 -                BUG();  /* FIXME: need type_info */
   9.206 -#endif
   9.207 +                l2_pgentry_t new_pde = guest2[i];
   9.208 +
   9.209                  if ( !is_guest_l2_slot(0, i) && !external )
   9.210                      continue;
   9.211  
   9.212 -                l2_pgentry_t new_pde = guest2[i];
   9.213                  if ( l2e_has_changed(new_pde, snapshot2[i], PAGE_FLAG_MASK) )
   9.214                  {
   9.215                      need_flush |= validate_hl2e_change(d, new_pde, &shadow2[i]);
   9.216 @@ -2554,6 +2562,7 @@ void __shadow_sync_all(struct domain *d)
   9.217  {
   9.218      struct out_of_sync_entry *entry;
   9.219      int need_flush = 0;
   9.220 +    l1_pgentry_t *ppte, opte, npte;
   9.221  
   9.222      perfc_incrc(shadow_sync_all);
   9.223  
   9.224 @@ -2569,11 +2578,10 @@ void __shadow_sync_all(struct domain *d)
   9.225          if ( entry->writable_pl1e & (sizeof(l1_pgentry_t)-1) )
   9.226              continue;
   9.227  
   9.228 -        l1_pgentry_t *ppte = (l1_pgentry_t *)(
   9.229 +        ppte = (l1_pgentry_t *)(
   9.230              (char *)map_domain_page(entry->writable_pl1e >> PAGE_SHIFT) +
   9.231              (entry->writable_pl1e & ~PAGE_MASK));
   9.232 -        l1_pgentry_t opte = *ppte;
   9.233 -        l1_pgentry_t npte = opte;
   9.234 +        opte = npte = *ppte;
   9.235          l1e_remove_flags(npte, _PAGE_RW);
   9.236  
   9.237          if ( (l1e_get_flags(npte) & _PAGE_PRESENT) &&
    10.1 --- a/xen/arch/x86/shadow_public.c	Tue Nov 22 18:21:22 2005 +0100
    10.2 +++ b/xen/arch/x86/shadow_public.c	Tue Nov 22 18:44:08 2005 +0100
    10.3 @@ -786,6 +786,7 @@ void free_shadow_pages(struct domain *d)
    10.4      int                   i;
    10.5      struct shadow_status *x;
    10.6      struct vcpu          *v;
    10.7 +    struct list_head *list_ent, *tmp;
    10.8  
    10.9      /*
   10.10       * WARNING! The shadow page table must not currently be in use!
   10.11 @@ -884,15 +885,14 @@ void free_shadow_pages(struct domain *d)
   10.12          xfree(mfn_list);
   10.13      }
   10.14  
   10.15 -    // Now free the pre-zero'ed pages from the domain
   10.16 -    //
   10.17 -    struct list_head *list_ent, *tmp;
   10.18 +    /* Now free the pre-zero'ed pages from the domain. */
   10.19      list_for_each_safe(list_ent, tmp, &d->arch.free_shadow_frames)
   10.20      {
   10.21 +        struct pfn_info *page = list_entry(list_ent, struct pfn_info, list);
   10.22 +
   10.23          list_del(list_ent);
   10.24          perfc_decr(free_l1_pages);
   10.25  
   10.26 -        struct pfn_info *page = list_entry(list_ent, struct pfn_info, list);
   10.27          if (d->arch.ops->guest_paging_levels == PAGING_L2)
   10.28          {
   10.29  #if CONFIG_PAGING_LEVELS >=4
   10.30 @@ -912,6 +912,11 @@ void free_shadow_pages(struct domain *d)
   10.31  
   10.32  void __shadow_mode_disable(struct domain *d)
   10.33  {
   10.34 +    struct vcpu *v;
   10.35 +#ifndef NDEBUG
   10.36 +    int i;
   10.37 +#endif
   10.38 +
   10.39      if ( unlikely(!shadow_mode_enabled(d)) )
   10.40          return;
   10.41  
   10.42 @@ -919,7 +924,6 @@ void __shadow_mode_disable(struct domain
   10.43      free_writable_pte_predictions(d);
   10.44  
   10.45  #ifndef NDEBUG
   10.46 -    int i;
   10.47      for ( i = 0; i < shadow_ht_buckets; i++ )
   10.48      {
   10.49          if ( d->arch.shadow_ht[i].gpfn_and_flags != 0 )
   10.50 @@ -936,11 +940,8 @@ void __shadow_mode_disable(struct domain
   10.51      free_shadow_ht_entries(d);
   10.52      free_out_of_sync_entries(d);
   10.53  
   10.54 -    struct vcpu *v;
   10.55      for_each_vcpu(d, v)
   10.56 -    {
   10.57          update_pagetables(v);
   10.58 -    }
   10.59  }
   10.60  
   10.61  
   10.62 @@ -1608,14 +1609,18 @@ remove_shadow(struct domain *d, unsigned
   10.63  unsigned long
   10.64  gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
   10.65  {
   10.66 -    ASSERT( shadow_mode_translate(d) );
   10.67 +    unsigned long va, tabpfn;
   10.68 +    l1_pgentry_t *l1, l1e;
   10.69 +    l2_pgentry_t *l2, l2e;
   10.70 +
   10.71 +    ASSERT(shadow_mode_translate(d));
   10.72  
   10.73      perfc_incrc(gpfn_to_mfn_foreign);
   10.74  
   10.75 -    unsigned long va = gpfn << PAGE_SHIFT;
   10.76 -    unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table);
   10.77 -    l2_pgentry_t *l2 = map_domain_page(tabpfn);
   10.78 -    l2_pgentry_t l2e = l2[l2_table_offset(va)];
   10.79 +    va = gpfn << PAGE_SHIFT;
   10.80 +    tabpfn = pagetable_get_pfn(d->arch.phys_table);
   10.81 +    l2 = map_domain_page(tabpfn);
   10.82 +    l2e = l2[l2_table_offset(va)];
   10.83      unmap_domain_page(l2);
   10.84      if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
   10.85      {
   10.86 @@ -1623,8 +1628,8 @@ gpfn_to_mfn_foreign(struct domain *d, un
   10.87                 d->domain_id, gpfn, l2e_get_intpte(l2e));
   10.88          return INVALID_MFN;
   10.89      }
   10.90 -    l1_pgentry_t *l1 = map_domain_page(l2e_get_pfn(l2e));
   10.91 -    l1_pgentry_t l1e = l1[l1_table_offset(va)];
   10.92 +    l1 = map_domain_page(l2e_get_pfn(l2e));
   10.93 +    l1e = l1[l1_table_offset(va)];
   10.94      unmap_domain_page(l1);
   10.95  
   10.96  #if 0
    11.1 --- a/xen/arch/x86/vmx.c	Tue Nov 22 18:21:22 2005 +0100
    11.2 +++ b/xen/arch/x86/vmx.c	Tue Nov 22 18:44:08 2005 +0100
    11.3 @@ -129,15 +129,14 @@ static u32 msr_data_index[VMX_MSR_COUNT]
    11.4   */
    11.5  void vmx_load_msrs(struct vcpu *n)
    11.6  {
    11.7 -    struct msr_state *host_state;
    11.8 -    host_state = &percpu_msr[smp_processor_id()];
    11.9 +    struct msr_state *host_state = &percpu_msr[smp_processor_id()];
   11.10 +    int i;
   11.11  
   11.12      if ( !vmx_switch_on )
   11.13          return;
   11.14  
   11.15 -    while (host_state->flags){
   11.16 -        int i;
   11.17 -
   11.18 +    while ( host_state->flags )
   11.19 +    {
   11.20          i = find_first_set_bit(host_state->flags);
   11.21          wrmsrl(msr_data_index[i], host_state->msr_items[i]);
   11.22          clear_bit(i, &host_state->flags);
   11.23 @@ -146,11 +145,10 @@ void vmx_load_msrs(struct vcpu *n)
   11.24  
   11.25  static void vmx_save_init_msrs(void)
   11.26  {
   11.27 -    struct msr_state *host_state;
   11.28 -    host_state = &percpu_msr[smp_processor_id()];
   11.29 +    struct msr_state *host_state = &percpu_msr[smp_processor_id()];
   11.30      int i;
   11.31  
   11.32 -    for (i = 0; i < VMX_MSR_COUNT; i++)
   11.33 +    for ( i = 0; i < VMX_MSR_COUNT; i++ )
   11.34          rdmsrl(msr_data_index[i], host_state->msr_items[i]);
   11.35  }
   11.36  
   11.37 @@ -516,23 +514,20 @@ static void vmx_vmexit_do_cpuid(unsigned
   11.38  
   11.39      cpuid(input, &eax, &ebx, &ecx, &edx);
   11.40  
   11.41 -    if (input == 1) {
   11.42 +    if ( input == 1 )
   11.43 +    {
   11.44          if ( vmx_apic_support(v->domain) &&
   11.45 -                !vlapic_global_enabled((VLAPIC(v))) )
   11.46 +             !vlapic_global_enabled((VLAPIC(v))) )
   11.47              clear_bit(X86_FEATURE_APIC, &edx);
   11.48 -#ifdef __i386__
   11.49 -        clear_bit(X86_FEATURE_PSE, &edx);
   11.50 -        clear_bit(X86_FEATURE_PAE, &edx);
   11.51 -        clear_bit(X86_FEATURE_PSE36, &edx);
   11.52 -#else
   11.53 -        struct vcpu *v = current;
   11.54 -        if (v->domain->arch.ops->guest_paging_levels == PAGING_L2)
   11.55 +
   11.56 +#ifdef __x86_64__
   11.57 +        if ( v->domain->arch.ops->guest_paging_levels == PAGING_L2 )
   11.58 +#endif
   11.59          {
   11.60              clear_bit(X86_FEATURE_PSE, &edx);
   11.61              clear_bit(X86_FEATURE_PAE, &edx);
   11.62              clear_bit(X86_FEATURE_PSE36, &edx);
   11.63          }
   11.64 -#endif
   11.65  
   11.66          /* Unsupportable for virtualised CPUs. */
   11.67          clear_bit(X86_FEATURE_VMXE & 31, &ecx);
   11.68 @@ -1084,6 +1079,7 @@ static int vmx_set_cr0(unsigned long val
   11.69      unsigned long eip;
   11.70      int paging_enabled;
   11.71      unsigned long vm_entry_value;
   11.72 +
   11.73      /*
   11.74       * CR0: We don't want to lose PE and PG.
   11.75       */
   11.76 @@ -1140,14 +1136,17 @@ static int vmx_set_cr0(unsigned long val
   11.77  #endif
   11.78          }
   11.79  
   11.80 -        unsigned long crn;
   11.81 -        /* update CR4's PAE if needed */
   11.82 -        __vmread(GUEST_CR4, &crn);
   11.83 -        if ( (!(crn & X86_CR4_PAE)) &&
   11.84 -             test_bit(VMX_CPU_STATE_PAE_ENABLED,
   11.85 -                      &v->arch.arch_vmx.cpu_state)){
   11.86 -            VMX_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n");
   11.87 -            __vmwrite(GUEST_CR4, crn | X86_CR4_PAE);
   11.88 +        {
   11.89 +            unsigned long crn;
   11.90 +            /* update CR4's PAE if needed */
   11.91 +            __vmread(GUEST_CR4, &crn);
   11.92 +            if ( (!(crn & X86_CR4_PAE)) &&
   11.93 +                 test_bit(VMX_CPU_STATE_PAE_ENABLED,
   11.94 +                          &v->arch.arch_vmx.cpu_state) )
   11.95 +            {
   11.96 +                VMX_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n");
   11.97 +                __vmwrite(GUEST_CR4, crn | X86_CR4_PAE);
   11.98 +            }
   11.99          }
  11.100  #endif
  11.101          /*
    12.1 --- a/xen/arch/x86/x86_32/traps.c	Tue Nov 22 18:21:22 2005 +0100
    12.2 +++ b/xen/arch/x86/x86_32/traps.c	Tue Nov 22 18:44:08 2005 +0100
    12.3 @@ -167,6 +167,7 @@ asmlinkage void smp_deferred_nmi(struct 
    12.4  
    12.5  void __init percpu_traps_init(void)
    12.6  {
    12.7 +    struct tss_struct *tss = &doublefault_tss;
    12.8      asmlinkage int hypercall(void);
    12.9  
   12.10      if ( smp_processor_id() != 0 )
   12.11 @@ -184,7 +185,6 @@ void __init percpu_traps_init(void)
   12.12       * Make a separate task for double faults. This will get us debug output if
   12.13       * we blow the kernel stack.
   12.14       */
   12.15 -    struct tss_struct *tss = &doublefault_tss;
   12.16      memset(tss, 0, sizeof(*tss));
   12.17      tss->ds     = __HYPERVISOR_DS;
   12.18      tss->es     = __HYPERVISOR_DS;
    13.1 --- a/xen/common/sched_sedf.c	Tue Nov 22 18:21:22 2005 +0100
    13.2 +++ b/xen/common/sched_sedf.c	Tue Nov 22 18:44:08 2005 +0100
    13.3 @@ -704,11 +704,12 @@ static struct task_slice sedf_do_schedul
    13.4      struct list_head     *waitq    = WAITQ(cpu);
    13.5  #if (EXTRA > EXTRA_OFF)
    13.6      struct sedf_vcpu_info *inf     = EDOM_INFO(current);
    13.7 -    struct list_head     *extraq[] = {EXTRAQ(cpu, EXTRA_PEN_Q),
    13.8 -                                      EXTRAQ(cpu, EXTRA_UTIL_Q)};
    13.9 +    struct list_head      *extraq[] = {
   13.10 +        EXTRAQ(cpu, EXTRA_PEN_Q), EXTRAQ(cpu, EXTRA_UTIL_Q)};
   13.11  #endif
   13.12 -    struct task_slice          ret;
   13.13 -    /*int i = 0;*/
   13.14 +    struct sedf_vcpu_info *runinf, *waitinf;
   13.15 +    struct task_slice      ret;
   13.16 +
   13.17      /*idle tasks don't need any of the following stuf*/
   13.18      if (is_idle_task(current->domain))
   13.19          goto check_waitq;
   13.20 @@ -737,7 +738,6 @@ static struct task_slice sedf_do_schedul
   13.21   
   13.22      /*now simply pick the first domain from the runqueue, which has the
   13.23        earliest deadline, because the list is sorted*/
   13.24 -    struct sedf_vcpu_info *runinf, *waitinf;
   13.25   
   13.26      if (!list_empty(runq)) {
   13.27          runinf   = list_entry(runq->next,struct sedf_vcpu_info,list);
    14.1 --- a/xen/include/asm-x86/shadow.h	Tue Nov 22 18:21:22 2005 +0100
    14.2 +++ b/xen/include/asm-x86/shadow.h	Tue Nov 22 18:44:08 2005 +0100
    14.3 @@ -173,11 +173,12 @@ extern void vmx_shadow_clear_state(struc
    14.4  static inline int page_is_page_table(struct pfn_info *page)
    14.5  {
    14.6      struct domain *owner = page_get_owner(page);
    14.7 +    u32 type_info;
    14.8  
    14.9      if ( owner && shadow_mode_refcounts(owner) )
   14.10          return page->count_info & PGC_page_table;
   14.11  
   14.12 -    u32 type_info = page->u.inuse.type_info & PGT_type_mask;
   14.13 +    type_info = page->u.inuse.type_info & PGT_type_mask;
   14.14      return type_info && (type_info <= PGT_l4_page_table);
   14.15  }
   14.16