ia64/xen-unstable

changeset 14775:1bde28f762a6

merge with xen-unstable.hg
author Alex Williamson <alex.williamson@hp.com>
date Mon Apr 09 13:40:25 2007 -0600 (2007-04-09)
parents cd3cfddcb055 400a3dca237e
children 7158623a1b3d 4d4a7ba24c01
files
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/i386/mm/hypervisor.c	Mon Apr 09 13:39:35 2007 -0600
     1.2 +++ b/linux-2.6-xen-sparse/arch/i386/mm/hypervisor.c	Mon Apr 09 13:40:25 2007 -0600
     1.3 @@ -353,6 +353,7 @@ int xen_create_contiguous_region(
     1.4  
     1.5  	return success ? 0 : -ENOMEM;
     1.6  }
     1.7 +EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
     1.8  
     1.9  void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
    1.10  {
    1.11 @@ -437,6 +438,7 @@ void xen_destroy_contiguous_region(unsig
    1.12  
    1.13  	balloon_unlock(flags);
    1.14  }
    1.15 +EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
    1.16  
    1.17  #ifdef __i386__
    1.18  int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
     2.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/maddr.h	Mon Apr 09 13:39:35 2007 -0600
     2.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/maddr.h	Mon Apr 09 13:40:25 2007 -0600
     2.3 @@ -153,23 +153,6 @@ static inline paddr_t pte_machine_to_phy
     2.4  }
     2.5  #endif
     2.6  
     2.7 -#else /* !CONFIG_XEN */
     2.8 -
     2.9 -#define pfn_to_mfn(pfn) (pfn)
    2.10 -#define mfn_to_pfn(mfn) (mfn)
    2.11 -#define mfn_to_local_pfn(mfn) (mfn)
    2.12 -#define set_phys_to_machine(pfn, mfn) ((void)0)
    2.13 -#define phys_to_machine_mapping_valid(pfn) (1)
    2.14 -#define phys_to_machine(phys) ((maddr_t)(phys))
    2.15 -#define machine_to_phys(mach) ((paddr_t)(mach))
    2.16 -
    2.17 -#endif /* !CONFIG_XEN */
    2.18 -
    2.19 -/* VIRT <-> MACHINE conversion */
    2.20 -#define virt_to_machine(v)	(phys_to_machine(__pa(v)))
    2.21 -#define virt_to_mfn(v)		(pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
    2.22 -#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
    2.23 -
    2.24  #ifdef CONFIG_X86_PAE
    2.25  static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot)
    2.26  {
    2.27 @@ -188,4 +171,23 @@ static inline pte_t pfn_pte_ma(unsigned 
    2.28  
    2.29  #define __pte_ma(x)	((pte_t) { (x) } )
    2.30  
    2.31 +#else /* !CONFIG_XEN */
    2.32 +
    2.33 +#define pfn_to_mfn(pfn) (pfn)
    2.34 +#define mfn_to_pfn(mfn) (mfn)
    2.35 +#define mfn_to_local_pfn(mfn) (mfn)
    2.36 +#define set_phys_to_machine(pfn, mfn) ((void)0)
    2.37 +#define phys_to_machine_mapping_valid(pfn) (1)
    2.38 +#define phys_to_machine(phys) ((maddr_t)(phys))
    2.39 +#define machine_to_phys(mach) ((paddr_t)(mach))
    2.40 +#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
    2.41 +#define __pte_ma(x) __pte(x)
    2.42 +
    2.43 +#endif /* !CONFIG_XEN */
    2.44 +
    2.45 +/* VIRT <-> MACHINE conversion */
    2.46 +#define virt_to_machine(v)	(phys_to_machine(__pa(v)))
    2.47 +#define virt_to_mfn(v)		(pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
    2.48 +#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
    2.49 +
    2.50  #endif /* _I386_MADDR_H */
     3.1 --- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/maddr.h	Mon Apr 09 13:39:35 2007 -0600
     3.2 +++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/maddr.h	Mon Apr 09 13:40:25 2007 -0600
     3.3 @@ -135,6 +135,9 @@ static inline paddr_t pte_machine_to_phy
     3.4  	return phys;
     3.5  }
     3.6  
     3.7 +#define __pte_ma(x)     ((pte_t) { (x) } )
     3.8 +#define pfn_pte_ma(pfn, prot)	__pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask)
     3.9 +
    3.10  #else /* !CONFIG_XEN */
    3.11  
    3.12  #define pfn_to_mfn(pfn) (pfn)
    3.13 @@ -144,6 +147,8 @@ static inline paddr_t pte_machine_to_phy
    3.14  #define phys_to_machine_mapping_valid(pfn) (1)
    3.15  #define phys_to_machine(phys) ((maddr_t)(phys))
    3.16  #define machine_to_phys(mach) ((paddr_t)(mach))
    3.17 +#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
    3.18 +#define __pte_ma(x) __pte(x)
    3.19  
    3.20  #endif /* !CONFIG_XEN */
    3.21  
    3.22 @@ -152,8 +157,5 @@ static inline paddr_t pte_machine_to_phy
    3.23  #define virt_to_mfn(v)		(pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
    3.24  #define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
    3.25  
    3.26 -#define __pte_ma(x)     ((pte_t) { (x) } )
    3.27 -#define pfn_pte_ma(pfn, prot)	__pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask)
    3.28 -
    3.29  #endif /* _X86_64_MADDR_H */
    3.30  
     4.1 --- a/tools/ioemu/target-i386-dm/exec-dm.c	Mon Apr 09 13:39:35 2007 -0600
     4.2 +++ b/tools/ioemu/target-i386-dm/exec-dm.c	Mon Apr 09 13:40:25 2007 -0600
     4.3 @@ -128,12 +128,10 @@ char *logfilename = "/tmp/qemu.log";
     4.4  FILE *logfile;
     4.5  int loglevel;
     4.6  
     4.7 -
     4.8  #ifdef MAPCACHE
     4.9  pthread_mutex_t mapcache_mutex;
    4.10  #endif
    4.11  
    4.12 -
    4.13  void cpu_exec_init(CPUState *env)
    4.14  {
    4.15      CPUState **penv;
    4.16 @@ -427,21 +425,10 @@ int iomem_index(target_phys_addr_t addr)
    4.17          return 0;
    4.18  }
    4.19  
    4.20 -static inline int paddr_is_ram(target_phys_addr_t addr)
    4.21 -{
    4.22 -    /* Is this guest physical address RAM-backed? */
    4.23 -#if defined(CONFIG_DM) && (defined(__i386__) || defined(__x86_64__))
    4.24 -    return ((addr < HVM_BELOW_4G_MMIO_START) ||
    4.25 -            (addr >= HVM_BELOW_4G_MMIO_START + HVM_BELOW_4G_MMIO_LENGTH));
    4.26 -#else
    4.27 -    return (addr < ram_size);
    4.28 -#endif
    4.29 -}
    4.30 -
    4.31  #if defined(__i386__) || defined(__x86_64__)
    4.32  #define phys_ram_addr(x) (qemu_map_cache(x))
    4.33  #elif defined(__ia64__)
    4.34 -#define phys_ram_addr(x) (phys_ram_base + (x))
    4.35 +#define phys_ram_addr(x) ((addr < ram_size) ? (phys_ram_base + (x)) : NULL)
    4.36  #endif
    4.37  
    4.38  extern unsigned long *logdirty_bitmap;
    4.39 @@ -481,16 +468,15 @@ void cpu_physical_memory_rw(target_phys_
    4.40                      io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
    4.41                      l = 1;
    4.42                  }
    4.43 -            } else if (paddr_is_ram(addr)) {
    4.44 +            } else if ((ptr = phys_ram_addr(addr)) != NULL) {
    4.45                  /* Writing to RAM */
    4.46 -                ptr = phys_ram_addr(addr);
    4.47                  memcpy(ptr, buf, l);
    4.48                  if (logdirty_bitmap != NULL) {
    4.49                      /* Record that we have dirtied this frame */
    4.50                      unsigned long pfn = addr >> TARGET_PAGE_BITS;
    4.51                      if (pfn / 8 >= logdirty_bitmap_size) {
    4.52 -                        fprintf(logfile, "dirtying pfn %x >= bitmap size %x\n",
    4.53 -                                pfn, logdirty_bitmap_size * 8);
    4.54 +                        fprintf(logfile, "dirtying pfn %lx >= bitmap "
    4.55 +                                "size %lx\n", pfn, logdirty_bitmap_size * 8);
    4.56                      } else {
    4.57                          logdirty_bitmap[pfn / HOST_LONG_BITS]
    4.58                              |= 1UL << pfn % HOST_LONG_BITS;
    4.59 @@ -518,9 +504,8 @@ void cpu_physical_memory_rw(target_phys_
    4.60                      stb_raw(buf, val);
    4.61                      l = 1;
    4.62                  }
    4.63 -            } else if (paddr_is_ram(addr)) {
    4.64 +            } else if ((ptr = phys_ram_addr(addr)) != NULL) {
    4.65                  /* Reading from RAM */
    4.66 -                ptr = phys_ram_addr(addr);
    4.67                  memcpy(buf, ptr, l);
    4.68              } else {
    4.69                  /* Neither RAM nor known MMIO space */
     5.1 --- a/tools/ioemu/vl.c	Mon Apr 09 13:39:35 2007 -0600
     5.2 +++ b/tools/ioemu/vl.c	Mon Apr 09 13:40:25 2007 -0600
     5.3 @@ -5894,7 +5894,32 @@ void suspend(int sig)
     5.4      suspend_requested = 1;
     5.5  }
     5.6  
     5.7 -#if defined(__i386__) || defined(__x86_64__)
     5.8 +#if defined(MAPCACHE)
     5.9 +
    5.10 +#if defined(__i386__) 
    5.11 +#define MAX_MCACHE_SIZE    0x40000000 /* 1GB max for x86 */
    5.12 +#define MCACHE_BUCKET_SHIFT 16
    5.13 +#elif defined(__x86_64__)
    5.14 +#define MAX_MCACHE_SIZE    0x1000000000 /* 64GB max for x86_64 */
    5.15 +#define MCACHE_BUCKET_SHIFT 20
    5.16 +#endif
    5.17 +
    5.18 +#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
    5.19 +
    5.20 +#define BITS_PER_LONG (sizeof(long)*8)
    5.21 +#define BITS_TO_LONGS(bits) \
    5.22 +    (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
    5.23 +#define DECLARE_BITMAP(name,bits) \
    5.24 +    unsigned long name[BITS_TO_LONGS(bits)]
    5.25 +#define test_bit(bit,map) \
    5.26 +    (!!((map)[(bit)/BITS_PER_LONG] & (1UL << ((bit)%BITS_PER_LONG))))
    5.27 +
    5.28 +struct map_cache {
    5.29 +    unsigned long paddr_index;
    5.30 +    uint8_t      *vaddr_base;
    5.31 +    DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE>>PAGE_SHIFT);
    5.32 +};
    5.33 +
    5.34  static struct map_cache *mapcache_entry;
    5.35  static unsigned long nr_buckets;
    5.36  
    5.37 @@ -5928,6 +5953,44 @@ static int qemu_map_cache_init(void)
    5.38      return 0;
    5.39  }
    5.40  
    5.41 +static void qemu_remap_bucket(struct map_cache *entry,
    5.42 +                              unsigned long address_index)
    5.43 +{
    5.44 +    uint8_t *vaddr_base;
    5.45 +    unsigned long pfns[MCACHE_BUCKET_SIZE >> PAGE_SHIFT];
    5.46 +    unsigned int i, j;
    5.47 +
    5.48 +    if (entry->vaddr_base != NULL) {
    5.49 +        errno = munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE);
    5.50 +        if (errno) {
    5.51 +            fprintf(logfile, "unmap fails %d\n", errno);
    5.52 +            exit(-1);
    5.53 +        }
    5.54 +    }
    5.55 +
    5.56 +    for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i++)
    5.57 +        pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-PAGE_SHIFT)) + i;
    5.58 +
    5.59 +    vaddr_base = xc_map_foreign_batch(xc_handle, domid, PROT_READ|PROT_WRITE,
    5.60 +                                      pfns, MCACHE_BUCKET_SIZE >> PAGE_SHIFT);
    5.61 +    if (vaddr_base == NULL) {
    5.62 +        fprintf(logfile, "xc_map_foreign_batch error %d\n", errno);
    5.63 +        exit(-1);
    5.64 +    }
    5.65 +
    5.66 +    entry->vaddr_base  = vaddr_base;
    5.67 +    entry->paddr_index = address_index;
    5.68 +
    5.69 +    for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i += BITS_PER_LONG) {
    5.70 +        unsigned long word = 0;
    5.71 +        j = ((i + BITS_PER_LONG) > (MCACHE_BUCKET_SIZE >> PAGE_SHIFT)) ?
    5.72 +            (MCACHE_BUCKET_SIZE >> PAGE_SHIFT) % BITS_PER_LONG : BITS_PER_LONG;
    5.73 +        while (j > 0)
    5.74 +            word = (word << 1) | !(pfns[i + --j] & 0xF0000000UL);
    5.75 +        entry->valid_mapping[i / BITS_PER_LONG] = word;
    5.76 +    }
    5.77 +}
    5.78 +
    5.79  uint8_t *qemu_map_cache(target_phys_addr_t phys_addr)
    5.80  {
    5.81      struct map_cache *entry;
    5.82 @@ -5939,34 +6002,12 @@ uint8_t *qemu_map_cache(target_phys_addr
    5.83  
    5.84      entry = &mapcache_entry[address_index % nr_buckets];
    5.85  
    5.86 -    if (entry->vaddr_base == NULL || entry->paddr_index != address_index) {
    5.87 -        /* We need to remap a bucket. */
    5.88 -        uint8_t *vaddr_base;
    5.89 -        unsigned long pfns[MCACHE_BUCKET_SIZE >> PAGE_SHIFT];
    5.90 -        unsigned int i;
    5.91 -
    5.92 -        if (entry->vaddr_base != NULL) {
    5.93 -            errno = munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE);
    5.94 -            if (errno) {
    5.95 -                fprintf(logfile, "unmap fails %d\n", errno);
    5.96 -                exit(-1);
    5.97 -            }
    5.98 -        }
    5.99 -
   5.100 -        for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i++)
   5.101 -            pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-PAGE_SHIFT)) + i;
   5.102 -
   5.103 -        vaddr_base = xc_map_foreign_batch(
   5.104 -            xc_handle, domid, PROT_READ|PROT_WRITE,
   5.105 -            pfns, MCACHE_BUCKET_SIZE >> PAGE_SHIFT);
   5.106 -        if (vaddr_base == NULL) {
   5.107 -            fprintf(logfile, "xc_map_foreign_batch error %d\n", errno);
   5.108 -            exit(-1);
   5.109 -        }
   5.110 -
   5.111 -        entry->vaddr_base  = vaddr_base;
   5.112 -        entry->paddr_index = address_index;;
   5.113 -    }
   5.114 +    if (entry->vaddr_base == NULL || entry->paddr_index != address_index ||
   5.115 +        !test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping))
   5.116 +        qemu_remap_bucket(entry, address_index);
   5.117 +
   5.118 +    if (!test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping))
   5.119 +        return NULL;
   5.120  
   5.121      last_address_index = address_index;
   5.122      last_address_vaddr = entry->vaddr_base;
   5.123 @@ -6001,7 +6042,8 @@ void qemu_invalidate_map_cache(void)
   5.124  
   5.125      mapcache_unlock();
   5.126  }
   5.127 -#endif
   5.128 +
   5.129 +#endif /* defined(MAPCACHE) */
   5.130  
   5.131  int main(int argc, char **argv)
   5.132  {
     6.1 --- a/tools/ioemu/vl.h	Mon Apr 09 13:39:35 2007 -0600
     6.2 +++ b/tools/ioemu/vl.h	Mon Apr 09 13:40:25 2007 -0600
     6.3 @@ -161,21 +161,6 @@ extern FILE *logfile;
     6.4  
     6.5  #define MAPCACHE
     6.6  
     6.7 -#if defined(__i386__) 
     6.8 -#define MAX_MCACHE_SIZE    0x40000000 /* 1GB max for x86 */
     6.9 -#define MCACHE_BUCKET_SHIFT 16
    6.10 -#elif defined(__x86_64__)
    6.11 -#define MAX_MCACHE_SIZE    0x1000000000 /* 64GB max for x86_64 */
    6.12 -#define MCACHE_BUCKET_SHIFT 20
    6.13 -#endif
    6.14 -
    6.15 -#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
    6.16 -
    6.17 -struct map_cache {
    6.18 -    unsigned long paddr_index;
    6.19 -    uint8_t      *vaddr_base;
    6.20 -};
    6.21 -
    6.22  uint8_t *qemu_map_cache(target_phys_addr_t phys_addr);
    6.23  void     qemu_invalidate_map_cache(void);
    6.24  
     7.1 --- a/tools/libxc/xc_domain_restore.c	Mon Apr 09 13:39:35 2007 -0600
     7.2 +++ b/tools/libxc/xc_domain_restore.c	Mon Apr 09 13:40:25 2007 -0600
     7.3 @@ -62,17 +62,17 @@ read_exact(int fd, void *buf, size_t cou
     7.4      int r = 0, s;
     7.5      unsigned char *b = buf;
     7.6  
     7.7 -    while (r < count) {
     7.8 +    while ( r < count )
     7.9 +    {
    7.10          s = read(fd, &b[r], count - r);
    7.11 -        if ((s == -1) && (errno == EINTR))
    7.12 +        if ( (s == -1) && (errno == EINTR) )
    7.13              continue;
    7.14 -        if (s <= 0) {
    7.15 +        if ( s <= 0 )
    7.16              break;
    7.17 -        }
    7.18          r += s;
    7.19      }
    7.20  
    7.21 -    return (r == count) ? 1 : 0;
    7.22 +    return (r == count);
    7.23  }
    7.24  
    7.25  /*
    7.26 @@ -93,20 +93,21 @@ static int uncanonicalize_pagetable(int 
    7.27      pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8);
    7.28  
    7.29      /* First pass: work out how many (if any) MFNs we need to alloc */
    7.30 -    for(i = 0; i < pte_last; i++) {
    7.31 -        
    7.32 -        if(pt_levels == 2)
    7.33 +    for ( i = 0; i < pte_last; i++ )
    7.34 +    {
    7.35 +        if ( pt_levels == 2 )
    7.36              pte = ((uint32_t *)page)[i];
    7.37          else
    7.38              pte = ((uint64_t *)page)[i];
    7.39 -        
    7.40 +
    7.41          /* XXX SMH: below needs fixing for PROT_NONE etc */
    7.42 -        if(!(pte & _PAGE_PRESENT))
    7.43 -            continue; 
    7.44 +        if ( !(pte & _PAGE_PRESENT) )
    7.45 +            continue;
    7.46          
    7.47          pfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86;
    7.48          
    7.49 -        if(pfn >= p2m_size) {
    7.50 +        if ( pfn >= p2m_size )
    7.51 +        {
    7.52              /* This "page table page" is probably not one; bail. */
    7.53              ERROR("Frame number in type %lu page table is out of range: "
    7.54                    "i=%d pfn=0x%lx p2m_size=%lu",
    7.55 @@ -114,16 +115,18 @@ static int uncanonicalize_pagetable(int 
    7.56              return 0;
    7.57          }
    7.58          
    7.59 -        if(p2m[pfn] == INVALID_P2M_ENTRY) {
    7.60 +        if ( p2m[pfn] == INVALID_P2M_ENTRY )
    7.61 +        {
    7.62              /* Have a 'valid' PFN without a matching MFN - need to alloc */
    7.63              p2m_batch[nr_mfns++] = pfn; 
    7.64          }
    7.65      }
    7.66 -    
    7.67 -    
    7.68 -    /* Allocate the requistite number of mfns */
    7.69 -    if (nr_mfns && xc_domain_memory_populate_physmap(
    7.70 -            xc_handle, dom, nr_mfns, 0, 0, p2m_batch) != 0) { 
    7.71 +
    7.72 +    /* Allocate the requisite number of mfns. */
    7.73 +    if ( nr_mfns &&
    7.74 +         (xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0, 0,
    7.75 +                                            p2m_batch) != 0) )
    7.76 +    { 
    7.77          ERROR("Failed to allocate memory for batch.!\n"); 
    7.78          errno = ENOMEM;
    7.79          return 0; 
    7.80 @@ -131,26 +134,26 @@ static int uncanonicalize_pagetable(int 
    7.81      
    7.82      /* Second pass: uncanonicalize each present PTE */
    7.83      nr_mfns = 0;
    7.84 -    for(i = 0; i < pte_last; i++) {
    7.85 -
    7.86 -        if(pt_levels == 2)
    7.87 +    for ( i = 0; i < pte_last; i++ )
    7.88 +    {
    7.89 +        if ( pt_levels == 2 )
    7.90              pte = ((uint32_t *)page)[i];
    7.91          else
    7.92              pte = ((uint64_t *)page)[i];
    7.93          
    7.94          /* XXX SMH: below needs fixing for PROT_NONE etc */
    7.95 -        if(!(pte & _PAGE_PRESENT))
    7.96 +        if ( !(pte & _PAGE_PRESENT) )
    7.97              continue;
    7.98          
    7.99          pfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86;
   7.100          
   7.101 -        if(p2m[pfn] == INVALID_P2M_ENTRY)
   7.102 +        if ( p2m[pfn] == INVALID_P2M_ENTRY )
   7.103              p2m[pfn] = p2m_batch[nr_mfns++];
   7.104  
   7.105          pte &= ~MADDR_MASK_X86;
   7.106          pte |= (uint64_t)p2m[pfn] << PAGE_SHIFT;
   7.107  
   7.108 -        if(pt_levels == 2)
   7.109 +        if ( pt_levels == 2 )
   7.110              ((uint32_t *)page)[i] = (uint32_t)pte;
   7.111          else
   7.112              ((uint64_t *)page)[i] = (uint64_t)pte;
   7.113 @@ -161,62 +164,72 @@ static int uncanonicalize_pagetable(int 
   7.114  
   7.115  
   7.116  /* Load the p2m frame list, plus potential extended info chunk */
   7.117 -static xen_pfn_t * load_p2m_frame_list(int io_fd, int *pae_extended_cr3)
   7.118 +static xen_pfn_t *load_p2m_frame_list(int io_fd, int *pae_extended_cr3)
   7.119  {
   7.120      xen_pfn_t *p2m_frame_list;
   7.121      vcpu_guest_context_t ctxt;
   7.122  
   7.123 -    if (!(p2m_frame_list = malloc(P2M_FL_SIZE))) {
   7.124 +    if ( (p2m_frame_list = malloc(P2M_FL_SIZE)) == NULL )
   7.125 +    {
   7.126          ERROR("Couldn't allocate p2m_frame_list array");
   7.127          return NULL;
   7.128      }
   7.129      
   7.130      /* Read first entry of P2M list, or extended-info signature (~0UL). */
   7.131 -    if (!read_exact(io_fd, p2m_frame_list, sizeof(long))) {
   7.132 -            ERROR("read extended-info signature failed");
   7.133 -            return NULL;
   7.134 -        }
   7.135 +    if ( !read_exact(io_fd, p2m_frame_list, sizeof(long)) )
   7.136 +    {
   7.137 +        ERROR("read extended-info signature failed");
   7.138 +        return NULL;
   7.139 +    }
   7.140      
   7.141 -    if (p2m_frame_list[0] == ~0UL) {
   7.142 +    if ( p2m_frame_list[0] == ~0UL )
   7.143 +    {
   7.144          uint32_t tot_bytes;
   7.145          
   7.146          /* Next 4 bytes: total size of following extended info. */
   7.147 -        if (!read_exact(io_fd, &tot_bytes, sizeof(tot_bytes))) {
   7.148 +        if ( !read_exact(io_fd, &tot_bytes, sizeof(tot_bytes)) )
   7.149 +        {
   7.150              ERROR("read extended-info size failed");
   7.151              return NULL;
   7.152          }
   7.153          
   7.154 -        while (tot_bytes) {
   7.155 +        while ( tot_bytes )
   7.156 +        {
   7.157              uint32_t chunk_bytes;
   7.158              char     chunk_sig[4];
   7.159              
   7.160              /* 4-character chunk signature + 4-byte remaining chunk size. */
   7.161 -            if (!read_exact(io_fd, chunk_sig, sizeof(chunk_sig)) ||
   7.162 -                !read_exact(io_fd, &chunk_bytes, sizeof(chunk_bytes))) {
   7.163 +            if ( !read_exact(io_fd, chunk_sig, sizeof(chunk_sig)) ||
   7.164 +                 !read_exact(io_fd, &chunk_bytes, sizeof(chunk_bytes)) )
   7.165 +            {
   7.166                  ERROR("read extended-info chunk signature failed");
   7.167                  return NULL;
   7.168              }
   7.169              tot_bytes -= 8;
   7.170 -            
   7.171 +
   7.172              /* VCPU context structure? */
   7.173 -            if (!strncmp(chunk_sig, "vcpu", 4)) {
   7.174 -                if (!read_exact(io_fd, &ctxt, sizeof(ctxt))) {
   7.175 +            if ( !strncmp(chunk_sig, "vcpu", 4) )
   7.176 +            {
   7.177 +                if ( !read_exact(io_fd, &ctxt, sizeof(ctxt)) )
   7.178 +                {
   7.179                      ERROR("read extended-info vcpu context failed");
   7.180                      return NULL;
   7.181                  }
   7.182                  tot_bytes   -= sizeof(struct vcpu_guest_context);
   7.183                  chunk_bytes -= sizeof(struct vcpu_guest_context);
   7.184                  
   7.185 -                if (ctxt.vm_assist & (1UL << VMASST_TYPE_pae_extended_cr3))
   7.186 +                if ( ctxt.vm_assist & (1UL << VMASST_TYPE_pae_extended_cr3) )
   7.187                      *pae_extended_cr3 = 1;
   7.188              }
   7.189              
   7.190              /* Any remaining bytes of this chunk: read and discard. */
   7.191 -            while (chunk_bytes) {
   7.192 +            while ( chunk_bytes )
   7.193 +            {
   7.194                  unsigned long sz = chunk_bytes;
   7.195                  if ( sz > P2M_FL_SIZE )
   7.196                      sz = P2M_FL_SIZE;
   7.197 -                if (!read_exact(io_fd, p2m_frame_list, sz)) {
   7.198 +                if ( !read_exact(io_fd, p2m_frame_list, sz) )
   7.199 +                {
   7.200                      ERROR("read-and-discard extended-info chunk bytes failed");
   7.201                      return NULL;
   7.202                  }
   7.203 @@ -224,25 +237,25 @@ static xen_pfn_t * load_p2m_frame_list(i
   7.204                  tot_bytes   -= sz;
   7.205              }
   7.206          }
   7.207 -        
   7.208 +
   7.209          /* Now read the real first entry of P2M list. */
   7.210 -        if (!read_exact(io_fd, p2m_frame_list, sizeof(long))) {
   7.211 +        if ( !read_exact(io_fd, p2m_frame_list, sizeof(long)) )
   7.212 +        {
   7.213              ERROR("read first entry of p2m_frame_list failed");
   7.214              return NULL;
   7.215          }
   7.216      }
   7.217 -    
   7.218 +
   7.219      /* First entry is already read into the p2m array. */
   7.220 -    if (!read_exact(io_fd, &p2m_frame_list[1], P2M_FL_SIZE - sizeof(long))) {
   7.221 -            ERROR("read p2m_frame_list failed");
   7.222 -            return NULL;
   7.223 +    if ( !read_exact(io_fd, &p2m_frame_list[1], P2M_FL_SIZE - sizeof(long)) )
   7.224 +    {
   7.225 +        ERROR("read p2m_frame_list failed");
   7.226 +        return NULL;
   7.227      }
   7.228      
   7.229      return p2m_frame_list;
   7.230  }
   7.231  
   7.232 -
   7.233 -
   7.234  int xc_domain_restore(int xc_handle, int io_fd, uint32_t dom,
   7.235                        unsigned int store_evtchn, unsigned long *store_mfn,
   7.236                        unsigned int console_evtchn, unsigned long *console_mfn,
   7.237 @@ -284,7 +297,7 @@ int xc_domain_restore(int xc_handle, int
   7.238      /* Our mapping of the current region (batch) */
   7.239      char *region_base;
   7.240  
   7.241 -    xc_mmu_t *mmu = NULL;
   7.242 +    struct xc_mmu *mmu = NULL;
   7.243  
   7.244      /* used by debug verify code */
   7.245      unsigned long buf[PAGE_SIZE/sizeof(unsigned long)];
   7.246 @@ -323,20 +336,23 @@ int xc_domain_restore(int xc_handle, int
   7.247          domctl.cmd    = XEN_DOMCTL_set_address_size;
   7.248          domctl.u.address_size.size = sizeof(unsigned long) * 8;
   7.249          rc = do_domctl(xc_handle, &domctl);
   7.250 -        if ( rc != 0 ) {
   7.251 +        if ( rc != 0 )
   7.252 +        {
   7.253              ERROR("Unable to set guest address size.");
   7.254              goto out;
   7.255          }
   7.256          rc = 1;
   7.257      }
   7.258  
   7.259 -    if(!get_platform_info(xc_handle, dom,
   7.260 -                          &max_mfn, &hvirt_start, &pt_levels)) {
   7.261 +    if ( !get_platform_info(xc_handle, dom,
   7.262 +                            &max_mfn, &hvirt_start, &pt_levels) )
   7.263 +    {
   7.264          ERROR("Unable to get platform info.");
   7.265          return 1;
   7.266      }
   7.267  
   7.268 -    if (lock_pages(&ctxt, sizeof(ctxt))) {
   7.269 +    if ( lock_pages(&ctxt, sizeof(ctxt)) )
   7.270 +    {
   7.271          /* needed for build domctl, but might as well do early */
   7.272          ERROR("Unable to lock ctxt");
   7.273          return 1;
   7.274 @@ -356,19 +372,22 @@ int xc_domain_restore(int xc_handle, int
   7.275      region_mfn = calloc(MAX_BATCH_SIZE, sizeof(xen_pfn_t));
   7.276      p2m_batch  = calloc(MAX_BATCH_SIZE, sizeof(xen_pfn_t));
   7.277  
   7.278 -    if ((p2m == NULL) || (pfn_type == NULL) ||
   7.279 -        (region_mfn == NULL) || (p2m_batch == NULL)) {
   7.280 +    if ( (p2m == NULL) || (pfn_type == NULL) ||
   7.281 +         (region_mfn == NULL) || (p2m_batch == NULL) )
   7.282 +    {
   7.283          ERROR("memory alloc failed");
   7.284          errno = ENOMEM;
   7.285          goto out;
   7.286      }
   7.287  
   7.288 -    if (lock_pages(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE)) {
   7.289 +    if ( lock_pages(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
   7.290 +    {
   7.291          ERROR("Could not lock region_mfn");
   7.292          goto out;
   7.293      }
   7.294  
   7.295 -    if (lock_pages(p2m_batch, sizeof(xen_pfn_t) * MAX_BATCH_SIZE)) {
   7.296 +    if ( lock_pages(p2m_batch, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
   7.297 +    {
   7.298          ERROR("Could not lock p2m_batch");
   7.299          goto out;
   7.300      }
   7.301 @@ -376,7 +395,8 @@ int xc_domain_restore(int xc_handle, int
   7.302      /* Get the domain's shared-info frame. */
   7.303      domctl.cmd = XEN_DOMCTL_getdomaininfo;
   7.304      domctl.domain = (domid_t)dom;
   7.305 -    if (xc_domctl(xc_handle, &domctl) < 0) {
   7.306 +    if ( xc_domctl(xc_handle, &domctl) < 0 )
   7.307 +    {
   7.308          ERROR("Could not get information on new domain");
   7.309          goto out;
   7.310      }
   7.311 @@ -386,7 +406,9 @@ int xc_domain_restore(int xc_handle, int
   7.312      for ( pfn = 0; pfn < p2m_size; pfn++ )
   7.313          p2m[pfn] = INVALID_P2M_ENTRY;
   7.314  
   7.315 -    if(!(mmu = xc_init_mmu_updates(xc_handle, dom))) {
   7.316 +    mmu = xc_alloc_mmu_updates(xc_handle, dom);
   7.317 +    if ( mmu == NULL )
   7.318 +    {
   7.319          ERROR("Could not initialise for MMU updates");
   7.320          goto out;
   7.321      }
   7.322 @@ -400,8 +422,8 @@ int xc_domain_restore(int xc_handle, int
   7.323      prev_pc = 0;
   7.324  
   7.325      n = m = 0;
   7.326 -    while (1) {
   7.327 -
   7.328 +    for ( ; ; )
   7.329 +    {
   7.330          int j, nr_mfns = 0; 
   7.331  
   7.332          this_pc = (n * 100) / p2m_size;
   7.333 @@ -411,39 +433,45 @@ int xc_domain_restore(int xc_handle, int
   7.334              prev_pc = this_pc;
   7.335          }
   7.336  
   7.337 -        if (!read_exact(io_fd, &j, sizeof(int))) {
   7.338 +        if ( !read_exact(io_fd, &j, sizeof(int)) )
   7.339 +        {
   7.340              ERROR("Error when reading batch size");
   7.341              goto out;
   7.342          }
   7.343  
   7.344          PPRINTF("batch %d\n",j);
   7.345  
   7.346 -        if (j == -1) {
   7.347 +        if ( j == -1 )
   7.348 +        {
   7.349              verify = 1;
   7.350              DPRINTF("Entering page verify mode\n");
   7.351              continue;
   7.352          }
   7.353  
   7.354 -        if (j == -2) {
   7.355 +        if ( j == -2 )
   7.356 +        {
   7.357              new_ctxt_format = 1;
   7.358 -            if (!read_exact(io_fd, &max_vcpu_id, sizeof(int)) ||
   7.359 -                (max_vcpu_id >= 64) ||
   7.360 -                !read_exact(io_fd, &vcpumap, sizeof(uint64_t))) {
   7.361 +            if ( !read_exact(io_fd, &max_vcpu_id, sizeof(int)) ||
   7.362 +                 (max_vcpu_id >= 64) ||
   7.363 +                 !read_exact(io_fd, &vcpumap, sizeof(uint64_t)) )
   7.364 +            {
   7.365                  ERROR("Error when reading max_vcpu_id");
   7.366                  goto out;
   7.367              }
   7.368              continue;
   7.369          }
   7.370  
   7.371 -        if (j == 0)
   7.372 +        if ( j == 0 )
   7.373              break;  /* our work here is done */
   7.374  
   7.375 -        if (j > MAX_BATCH_SIZE) {
   7.376 +        if ( j > MAX_BATCH_SIZE )
   7.377 +        {
   7.378              ERROR("Max batch size exceeded. Giving up.");
   7.379              goto out;
   7.380          }
   7.381  
   7.382 -        if (!read_exact(io_fd, region_pfn_type, j*sizeof(unsigned long))) {
   7.383 +        if ( !read_exact(io_fd, region_pfn_type, j*sizeof(unsigned long)) )
   7.384 +        {
   7.385              ERROR("Error when reading region pfn types");
   7.386              goto out;
   7.387          }
   7.388 @@ -464,10 +492,11 @@ int xc_domain_restore(int xc_handle, int
   7.389              }
   7.390          } 
   7.391  
   7.392 -
   7.393          /* Now allocate a bunch of mfns for this batch */
   7.394 -        if (nr_mfns && xc_domain_memory_populate_physmap(
   7.395 -                xc_handle, dom, nr_mfns, 0, 0, p2m_batch) != 0) { 
   7.396 +        if ( nr_mfns &&
   7.397 +             (xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0,
   7.398 +                                                0, p2m_batch) != 0) )
   7.399 +        { 
   7.400              ERROR("Failed to allocate memory for batch.!\n"); 
   7.401              errno = ENOMEM;
   7.402              goto out;
   7.403 @@ -481,11 +510,12 @@ int xc_domain_restore(int xc_handle, int
   7.404              pfn      = region_pfn_type[i] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
   7.405              pagetype = region_pfn_type[i] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
   7.406  
   7.407 -            if ( pagetype == XEN_DOMCTL_PFINFO_XTAB)
   7.408 +            if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
   7.409                  region_mfn[i] = ~0UL; /* map will fail but we don't care */
   7.410              else 
   7.411              {
   7.412 -                if (p2m[pfn] == INVALID_P2M_ENTRY) {
   7.413 +                if ( p2m[pfn] == INVALID_P2M_ENTRY )
   7.414 +                {
   7.415                      /* We just allocated a new mfn above; update p2m */
   7.416                      p2m[pfn] = p2m_batch[nr_mfns++]; 
   7.417                      nr_pfns++; 
   7.418 @@ -532,7 +562,8 @@ int xc_domain_restore(int xc_handle, int
   7.419              /* In verify mode, we use a copy; otherwise we work in place */
   7.420              page = verify ? (void *)buf : (region_base + i*PAGE_SIZE);
   7.421  
   7.422 -            if (!read_exact(io_fd, page, PAGE_SIZE)) {
   7.423 +            if ( !read_exact(io_fd, page, PAGE_SIZE) )
   7.424 +            {
   7.425                  ERROR("Error when reading page (type was %lx)", pagetype);
   7.426                  goto out;
   7.427              }
   7.428 @@ -577,13 +608,11 @@ int xc_domain_restore(int xc_handle, int
   7.429  
   7.430              }
   7.431  
   7.432 -
   7.433 -            if (verify) {
   7.434 -
   7.435 +            if ( verify )
   7.436 +            {
   7.437                  int res = memcmp(buf, (region_base + i*PAGE_SIZE), PAGE_SIZE);
   7.438 -
   7.439 -                if (res) {
   7.440 -
   7.441 +                if ( res )
   7.442 +                {
   7.443                      int v;
   7.444  
   7.445                      DPRINTF("************** pfn=%lx type=%lx gotcs=%08lx "
   7.446 @@ -591,20 +620,21 @@ int xc_domain_restore(int xc_handle, int
   7.447                              csum_page(region_base + i*PAGE_SIZE),
   7.448                              csum_page(buf));
   7.449  
   7.450 -                    for (v = 0; v < 4; v++) {
   7.451 -
   7.452 +                    for ( v = 0; v < 4; v++ )
   7.453 +                    {
   7.454                          unsigned long *p = (unsigned long *)
   7.455                              (region_base + i*PAGE_SIZE);
   7.456 -                        if (buf[v] != p[v])
   7.457 +                        if ( buf[v] != p[v] )
   7.458                              DPRINTF("    %d: %08lx %08lx\n", v, buf[v], p[v]);
   7.459                      }
   7.460                  }
   7.461              }
   7.462  
   7.463 -            if (!hvm 
   7.464 -                && xc_add_mmu_update(xc_handle, mmu,
   7.465 -                                     (((unsigned long long)mfn) << PAGE_SHIFT)
   7.466 -                                     | MMU_MACHPHYS_UPDATE, pfn)) {
   7.467 +            if ( !hvm &&
   7.468 +                 xc_add_mmu_update(xc_handle, mmu,
   7.469 +                                   (((unsigned long long)mfn) << PAGE_SHIFT)
   7.470 +                                   | MMU_MACHPHYS_UPDATE, pfn) )
   7.471 +            {
   7.472                  ERROR("failed machpys update mfn=%lx pfn=%lx", mfn, pfn);
   7.473                  goto out;
   7.474              }
   7.475 @@ -629,8 +659,9 @@ int xc_domain_restore(int xc_handle, int
   7.476       * Ensure we flush all machphys updates before potential PAE-specific
   7.477       * reallocations below.
   7.478       */
   7.479 -    if (!hvm && xc_finish_mmu_updates(xc_handle, mmu)) {
   7.480 -        ERROR("Error doing finish_mmu_updates()");
   7.481 +    if ( !hvm && xc_flush_mmu_updates(xc_handle, mmu) )
   7.482 +    {
   7.483 +        ERROR("Error doing flush_mmu_updates()");
   7.484          goto out;
   7.485      }
   7.486  
   7.487 @@ -664,9 +695,9 @@ int xc_domain_restore(int xc_handle, int
   7.488          *store_mfn = magic_pfns[2];
   7.489  
   7.490          /* Read vcpu contexts */
   7.491 -        for (i = 0; i <= max_vcpu_id; i++) 
   7.492 +        for ( i = 0; i <= max_vcpu_id; i++ )
   7.493          {
   7.494 -            if (!(vcpumap & (1ULL << i)))
   7.495 +            if ( !(vcpumap & (1ULL << i)) )
   7.496                  continue;
   7.497  
   7.498              if ( !read_exact(io_fd, &(ctxt), sizeof(ctxt)) )
   7.499 @@ -713,8 +744,8 @@ int xc_domain_restore(int xc_handle, int
   7.500  
   7.501      /* Non-HVM guests only from here on */
   7.502  
   7.503 -    if ((pt_levels == 3) && !pae_extended_cr3) {
   7.504 -
   7.505 +    if ( (pt_levels == 3) && !pae_extended_cr3 )
   7.506 +    {
   7.507          /*
   7.508          ** XXX SMH on PAE we need to ensure PGDs are in MFNs < 4G. This
   7.509          ** is a little awkward and involves (a) finding all such PGDs and
   7.510 @@ -744,21 +775,24 @@ int xc_domain_restore(int xc_handle, int
   7.511                      xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   7.512                                           PROT_READ, p2m[i]);
   7.513  
   7.514 -                for(j = 0; j < 4; j++)
   7.515 +                for ( j = 0; j < 4; j++ )
   7.516                      l3ptes[j] = l3tab[j];
   7.517  
   7.518                  munmap(l3tab, PAGE_SIZE);
   7.519  
   7.520 -                if (!(new_mfn=xc_make_page_below_4G(xc_handle, dom, p2m[i]))) {
   7.521 +                new_mfn = xc_make_page_below_4G(xc_handle, dom, p2m[i]);
   7.522 +                if ( !new_mfn )
   7.523 +                {
   7.524                      ERROR("Couldn't get a page below 4GB :-(");
   7.525                      goto out;
   7.526                  }
   7.527  
   7.528                  p2m[i] = new_mfn;
   7.529 -                if (xc_add_mmu_update(xc_handle, mmu,
   7.530 -                                      (((unsigned long long)new_mfn)
   7.531 -                                       << PAGE_SHIFT) |
   7.532 -                                      MMU_MACHPHYS_UPDATE, i)) {
   7.533 +                if ( xc_add_mmu_update(xc_handle, mmu,
   7.534 +                                       (((unsigned long long)new_mfn)
   7.535 +                                        << PAGE_SHIFT) |
   7.536 +                                       MMU_MACHPHYS_UPDATE, i) )
   7.537 +                {
   7.538                      ERROR("Couldn't m2p on PAE root pgdir");
   7.539                      goto out;
   7.540                  }
   7.541 @@ -767,11 +801,10 @@ int xc_domain_restore(int xc_handle, int
   7.542                      xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   7.543                                           PROT_READ | PROT_WRITE, p2m[i]);
   7.544  
   7.545 -                for(j = 0; j < 4; j++)
   7.546 +                for ( j = 0; j < 4; j++ )
   7.547                      l3tab[j] = l3ptes[j];
   7.548  
   7.549                  munmap(l3tab, PAGE_SIZE);
   7.550 -
   7.551              }
   7.552          }
   7.553  
   7.554 @@ -787,19 +820,22 @@ int xc_domain_restore(int xc_handle, int
   7.555                  j++;
   7.556              }
   7.557  
   7.558 -            if(i == (p2m_size-1) || j == MAX_BATCH_SIZE) {
   7.559 -
   7.560 -                if (!(region_base = xc_map_foreign_batch(
   7.561 -                          xc_handle, dom, PROT_READ | PROT_WRITE,
   7.562 -                          region_mfn, j))) {
   7.563 +            if ( (i == (p2m_size-1)) || (j == MAX_BATCH_SIZE) )
   7.564 +            {
   7.565 +                region_base = xc_map_foreign_batch(
   7.566 +                    xc_handle, dom, PROT_READ | PROT_WRITE, region_mfn, j);
   7.567 +                if ( region_base == NULL )
   7.568 +                {
   7.569                      ERROR("map batch failed");
   7.570                      goto out;
   7.571                  }
   7.572  
   7.573 -                for(k = 0; k < j; k++) {
   7.574 -                    if(!uncanonicalize_pagetable(xc_handle, dom, 
   7.575 -                                                 XEN_DOMCTL_PFINFO_L1TAB,
   7.576 -                                                 region_base + k*PAGE_SIZE)) {
   7.577 +                for ( k = 0; k < j; k++ )
   7.578 +                {
   7.579 +                    if ( !uncanonicalize_pagetable(
   7.580 +                        xc_handle, dom, XEN_DOMCTL_PFINFO_L1TAB,
   7.581 +                        region_base + k*PAGE_SIZE) )
   7.582 +                    {
   7.583                          ERROR("failed uncanonicalize pt!");
   7.584                          goto out;
   7.585                      }
   7.586 @@ -810,8 +846,9 @@ int xc_domain_restore(int xc_handle, int
   7.587              }
   7.588          }
   7.589  
   7.590 -        if (xc_finish_mmu_updates(xc_handle, mmu)) {
   7.591 -            ERROR("Error doing finish_mmu_updates()");
   7.592 +        if ( xc_flush_mmu_updates(xc_handle, mmu) )
   7.593 +        {
   7.594 +            ERROR("Error doing xc_flush_mmu_updates()");
   7.595              goto out;
   7.596          }
   7.597      }
   7.598 @@ -852,8 +889,10 @@ int xc_domain_restore(int xc_handle, int
   7.599          nr_pins++;
   7.600  
   7.601          /* Batch full? Then flush. */
   7.602 -        if (nr_pins == MAX_PIN_BATCH) {
   7.603 -            if (xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0) {
   7.604 +        if ( nr_pins == MAX_PIN_BATCH )
   7.605 +        {
   7.606 +            if ( xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0 )
   7.607 +            {
   7.608                  ERROR("Failed to pin batch of %d page tables", nr_pins);
   7.609                  goto out;
   7.610              }
   7.611 @@ -862,7 +901,8 @@ int xc_domain_restore(int xc_handle, int
   7.612      }
   7.613  
   7.614      /* Flush final partial batch. */
   7.615 -    if ((nr_pins != 0) && (xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0)) {
   7.616 +    if ( (nr_pins != 0) && (xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0) )
   7.617 +    {
   7.618          ERROR("Failed to pin batch of %d page tables", nr_pins);
   7.619          goto out;
   7.620      }
   7.621 @@ -876,36 +916,40 @@ int xc_domain_restore(int xc_handle, int
   7.622          unsigned long *pfntab;
   7.623          int nr_frees, rc;
   7.624  
   7.625 -        if (!read_exact(io_fd, &count, sizeof(count))) {
   7.626 +        if ( !read_exact(io_fd, &count, sizeof(count)) )
   7.627 +        {
   7.628              ERROR("Error when reading pfn count");
   7.629              goto out;
   7.630          }
   7.631  
   7.632 -        if(!(pfntab = malloc(sizeof(unsigned long) * count))) {
   7.633 +        if ( !(pfntab = malloc(sizeof(unsigned long) * count)) )
   7.634 +        {
   7.635              ERROR("Out of memory");
   7.636              goto out;
   7.637          }
   7.638  
   7.639 -        if (!read_exact(io_fd, pfntab, sizeof(unsigned long)*count)) {
   7.640 +        if ( !read_exact(io_fd, pfntab, sizeof(unsigned long)*count) )
   7.641 +        {
   7.642              ERROR("Error when reading pfntab");
   7.643              goto out;
   7.644          }
   7.645  
   7.646          nr_frees = 0; 
   7.647 -        for (i = 0; i < count; i++) {
   7.648 -
   7.649 +        for ( i = 0; i < count; i++ )
   7.650 +        {
   7.651              unsigned long pfn = pfntab[i];
   7.652  
   7.653 -            if(p2m[pfn] != INVALID_P2M_ENTRY) {
   7.654 +            if ( p2m[pfn] != INVALID_P2M_ENTRY )
   7.655 +            {
   7.656                  /* pfn is not in physmap now, but was at some point during 
   7.657                     the save/migration process - need to free it */
   7.658                  pfntab[nr_frees++] = p2m[pfn];
   7.659 -                p2m[pfn]  = INVALID_P2M_ENTRY; // not in pseudo-physical map
   7.660 +                p2m[pfn]  = INVALID_P2M_ENTRY; /* not in pseudo-physical map */
   7.661              }
   7.662          }
   7.663  
   7.664 -        if (nr_frees > 0) {
   7.665 -
   7.666 +        if ( nr_frees > 0 )
   7.667 +        {
   7.668              struct xen_memory_reservation reservation = {
   7.669                  .nr_extents   = nr_frees,
   7.670                  .extent_order = 0,
   7.671 @@ -913,20 +957,24 @@ int xc_domain_restore(int xc_handle, int
   7.672              };
   7.673              set_xen_guest_handle(reservation.extent_start, pfntab);
   7.674  
   7.675 -            if ((rc = xc_memory_op(xc_handle, XENMEM_decrease_reservation,
   7.676 -                                   &reservation)) != nr_frees) {
   7.677 +            if ( (rc = xc_memory_op(xc_handle, XENMEM_decrease_reservation,
   7.678 +                                    &reservation)) != nr_frees )
   7.679 +            {
   7.680                  ERROR("Could not decrease reservation : %d", rc);
   7.681                  goto out;
   7.682 -            } else
   7.683 +            }
   7.684 +            else
   7.685                  DPRINTF("Decreased reservation by %d pages\n", count);
   7.686          }
   7.687      }
   7.688  
   7.689 -    for (i = 0; i <= max_vcpu_id; i++) {
   7.690 -        if (!(vcpumap & (1ULL << i)))
   7.691 +    for ( i = 0; i <= max_vcpu_id; i++ )
   7.692 +    {
   7.693 +        if ( !(vcpumap & (1ULL << i)) )
   7.694              continue;
   7.695  
   7.696 -        if (!read_exact(io_fd, &ctxt, sizeof(ctxt))) {
   7.697 +        if ( !read_exact(io_fd, &ctxt, sizeof(ctxt)) )
   7.698 +        {
   7.699              ERROR("Error when reading ctxt %d", i);
   7.700              goto out;
   7.701          }
   7.702 @@ -934,14 +982,16 @@ int xc_domain_restore(int xc_handle, int
   7.703          if ( !new_ctxt_format )
   7.704              ctxt.flags |= VGCF_online;
   7.705  
   7.706 -        if (i == 0) {
   7.707 +        if ( i == 0 )
   7.708 +        {
   7.709              /*
   7.710               * Uncanonicalise the suspend-record frame number and poke
   7.711               * resume record.
   7.712               */
   7.713              pfn = ctxt.user_regs.edx;
   7.714 -            if ((pfn >= p2m_size) ||
   7.715 -                (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB)) {
   7.716 +            if ( (pfn >= p2m_size) ||
   7.717 +                 (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
   7.718 +            {
   7.719                  ERROR("Suspend record frame number is bad");
   7.720                  goto out;
   7.721              }
   7.722 @@ -960,15 +1010,18 @@ int xc_domain_restore(int xc_handle, int
   7.723          }
   7.724  
   7.725          /* Uncanonicalise each GDT frame number. */
   7.726 -        if (ctxt.gdt_ents > 8192) {
   7.727 +        if ( ctxt.gdt_ents > 8192 )
   7.728 +        {
   7.729              ERROR("GDT entry count out of range");
   7.730              goto out;
   7.731          }
   7.732  
   7.733 -        for (j = 0; (512*j) < ctxt.gdt_ents; j++) {
   7.734 +        for ( j = 0; (512*j) < ctxt.gdt_ents; j++ )
   7.735 +        {
   7.736              pfn = ctxt.gdt_frames[j];
   7.737 -            if ((pfn >= p2m_size) ||
   7.738 -                (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB)) {
   7.739 +            if ( (pfn >= p2m_size) ||
   7.740 +                 (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
   7.741 +            {
   7.742                  ERROR("GDT frame number is bad");
   7.743                  goto out;
   7.744              }
   7.745 @@ -978,14 +1031,16 @@ int xc_domain_restore(int xc_handle, int
   7.746          /* Uncanonicalise the page table base pointer. */
   7.747          pfn = xen_cr3_to_pfn(ctxt.ctrlreg[3]);
   7.748  
   7.749 -        if (pfn >= p2m_size) {
   7.750 +        if ( pfn >= p2m_size )
   7.751 +        {
   7.752              ERROR("PT base is bad: pfn=%lu p2m_size=%lu type=%08lx",
   7.753                    pfn, p2m_size, pfn_type[pfn]);
   7.754              goto out;
   7.755          }
   7.756  
   7.757          if ( (pfn_type[pfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) !=
   7.758 -             ((unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) ) {
   7.759 +             ((unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) )
   7.760 +        {
   7.761              ERROR("PT base is bad. pfn=%lu nr=%lu type=%08lx %08lx",
   7.762                    pfn, p2m_size, pfn_type[pfn],
   7.763                    (unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT);
   7.764 @@ -999,14 +1054,16 @@ int xc_domain_restore(int xc_handle, int
   7.765          {
   7.766              pfn = xen_cr3_to_pfn(ctxt.ctrlreg[1]);
   7.767  
   7.768 -            if (pfn >= p2m_size) {
   7.769 +            if ( pfn >= p2m_size )
   7.770 +            {
   7.771                  ERROR("User PT base is bad: pfn=%lu p2m_size=%lu type=%08lx",
   7.772                        pfn, p2m_size, pfn_type[pfn]);
   7.773                  goto out;
   7.774              }
   7.775  
   7.776              if ( (pfn_type[pfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) !=
   7.777 -                 ((unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) ) {
   7.778 +                 ((unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) )
   7.779 +            {
   7.780                  ERROR("User PT base is bad. pfn=%lu nr=%lu type=%08lx %08lx",
   7.781                        pfn, p2m_size, pfn_type[pfn],
   7.782                        (unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT);
   7.783 @@ -1021,14 +1078,16 @@ int xc_domain_restore(int xc_handle, int
   7.784          domctl.u.vcpucontext.vcpu = i;
   7.785          set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt);
   7.786          rc = xc_domctl(xc_handle, &domctl);
   7.787 -        if (rc != 0) {
   7.788 +        if ( rc != 0 )
   7.789 +        {
   7.790              ERROR("Couldn't build vcpu%d", i);
   7.791              goto out;
   7.792          }
   7.793          rc = 1;
   7.794      }
   7.795  
   7.796 -    if (!read_exact(io_fd, shared_info_page, PAGE_SIZE)) {
   7.797 +    if ( !read_exact(io_fd, shared_info_page, PAGE_SIZE) )
   7.798 +    {
   7.799          ERROR("Error when reading shared info page");
   7.800          goto out;
   7.801      }
   7.802 @@ -1046,9 +1105,11 @@ int xc_domain_restore(int xc_handle, int
   7.803      munmap(page, PAGE_SIZE);
   7.804  
   7.805      /* Uncanonicalise the pfn-to-mfn table frame-number list. */
   7.806 -    for (i = 0; i < P2M_FL_ENTRIES; i++) {
   7.807 +    for ( i = 0; i < P2M_FL_ENTRIES; i++ )
   7.808 +    {
   7.809          pfn = p2m_frame_list[i];
   7.810 -        if ((pfn >= p2m_size) || (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB)) {
   7.811 +        if ( (pfn >= p2m_size) || (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
   7.812 +        {
   7.813              ERROR("PFN-to-MFN frame number is bad");
   7.814              goto out;
   7.815          }
   7.816 @@ -1057,8 +1118,9 @@ int xc_domain_restore(int xc_handle, int
   7.817      }
   7.818  
   7.819      /* Copy the P2M we've constructed to the 'live' P2M */
   7.820 -    if (!(live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_WRITE,
   7.821 -                                          p2m_frame_list, P2M_FL_ENTRIES))) {
   7.822 +    if ( !(live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_WRITE,
   7.823 +                                           p2m_frame_list, P2M_FL_ENTRIES)) )
   7.824 +    {
   7.825          ERROR("Couldn't map p2m table");
   7.826          goto out;
   7.827      }
     8.1 --- a/tools/libxc/xc_hvm_save.c	Mon Apr 09 13:39:35 2007 -0600
     8.2 +++ b/tools/libxc/xc_hvm_save.c	Mon Apr 09 13:40:25 2007 -0600
     8.3 @@ -119,8 +119,8 @@ static uint64_t llgettimeofday(void)
     8.4  
     8.5  static uint64_t tv_delta(struct timeval *new, struct timeval *old)
     8.6  {
     8.7 -    return ((new->tv_sec - old->tv_sec)*1000000 ) +
     8.8 -        (new->tv_usec - old->tv_usec);
     8.9 +    return (((new->tv_sec - old->tv_sec)*1000000) +
    8.10 +            (new->tv_usec - old->tv_usec));
    8.11  }
    8.12  
    8.13  
    8.14 @@ -130,9 +130,7 @@ static uint64_t tv_delta(struct timeval 
    8.15  
    8.16  static inline ssize_t write_exact(int fd, void *buf, size_t count)
    8.17  {
    8.18 -    if(write(fd, buf, count) != count)
    8.19 -        return 0;
    8.20 -    return 1;
    8.21 +    return (write(fd, buf, count) == count);
    8.22  }
    8.23  
    8.24  static int print_stats(int xc_handle, uint32_t domid, int pages_sent,
    8.25 @@ -156,15 +154,14 @@ static int print_stats(int xc_handle, ui
    8.26          DPRINTF("ARRHHH!!\n");
    8.27  
    8.28      wall_delta = tv_delta(&wall_now,&wall_last)/1000;
    8.29 -
    8.30 -    if (wall_delta == 0) wall_delta = 1;
    8.31 +    if ( wall_delta == 0 )
    8.32 +        wall_delta = 1;
    8.33  
    8.34      d0_cpu_delta = (d0_cpu_now - d0_cpu_last)/1000;
    8.35      d1_cpu_delta = (d1_cpu_now - d1_cpu_last)/1000;
    8.36  
    8.37 -    if (print)
    8.38 -        DPRINTF(
    8.39 -                "delta %lldms, dom0 %d%%, target %d%%, sent %dMb/s, "
    8.40 +    if ( print )
    8.41 +        DPRINTF("delta %lldms, dom0 %d%%, target %d%%, sent %dMb/s, "
    8.42                  "dirtied %dMb/s %" PRId32 " pages\n",
    8.43                  wall_delta,
    8.44                  (int)((d0_cpu_delta*100)/wall_delta),
    8.45 @@ -189,18 +186,19 @@ static int analysis_phase(int xc_handle,
    8.46  
    8.47      start = llgettimeofday();
    8.48  
    8.49 -    for (j = 0; j < runs; j++) {
    8.50 +    for ( j = 0; j < runs; j++ )
    8.51 +    {
    8.52          int i;
    8.53  
    8.54          xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_CLEAN,
    8.55                            arr, pfn_array_size, NULL, 0, NULL);
    8.56          DPRINTF("#Flush\n");
    8.57 -        for ( i = 0; i < 40; i++ ) {
    8.58 +        for ( i = 0; i < 40; i++ )
    8.59 +        {
    8.60              usleep(50000);
    8.61              now = llgettimeofday();
    8.62              xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_PEEK,
    8.63                                NULL, 0, NULL, 0, &stats);
    8.64 -
    8.65              DPRINTF("now= %lld faults= %"PRId32" dirty= %"PRId32"\n",
    8.66                      ((now-start)+500)/1000,
    8.67                      stats.fault_count, stats.dirty_count);
    8.68 @@ -216,39 +214,39 @@ static int suspend_and_state(int (*suspe
    8.69  {
    8.70      int i = 0;
    8.71  
    8.72 -    if (!(*suspend)(dom)) {
    8.73 +    if ( !(*suspend)(dom) )
    8.74 +    {
    8.75          ERROR("Suspend request failed");
    8.76          return -1;
    8.77      }
    8.78  
    8.79   retry:
    8.80  
    8.81 -    if (xc_domain_getinfo(xc_handle, dom, 1, info) != 1) {
    8.82 +    if ( xc_domain_getinfo(xc_handle, dom, 1, info) != 1 )
    8.83 +    {
    8.84          ERROR("Could not get domain info");
    8.85          return -1;
    8.86      }
    8.87  
    8.88 -    if ( xc_vcpu_getcontext(xc_handle, dom, 0 /* XXX */, ctxt))
    8.89 +    if ( xc_vcpu_getcontext(xc_handle, dom, 0, ctxt) )
    8.90          ERROR("Could not get vcpu context");
    8.91  
    8.92 -
    8.93 -    if (info->shutdown && info->shutdown_reason == SHUTDOWN_suspend)
    8.94 -        return 0; // success        
    8.95 +    if ( info->shutdown && (info->shutdown_reason == SHUTDOWN_suspend) )
    8.96 +        return 0; /* success */
    8.97  
    8.98 -    if (info->paused) {
    8.99 -        // try unpausing domain, wait, and retest
   8.100 +    if ( info->paused )
   8.101 +    {
   8.102 +        /* Try unpausing domain, wait, and retest. */
   8.103          xc_domain_unpause( xc_handle, dom );
   8.104 -
   8.105          ERROR("Domain was paused. Wait and re-test.");
   8.106 -        usleep(10000);  // 10ms
   8.107 -
   8.108 +        usleep(10000);  /* 10ms */
   8.109          goto retry;
   8.110      }
   8.111  
   8.112 -
   8.113 -    if( ++i < 100 ) {
   8.114 +    if ( ++i < 100 )
   8.115 +    {
   8.116          ERROR("Retry suspend domain.");
   8.117 -        usleep(10000);  // 10ms
   8.118 +        usleep(10000); /* 10ms */
   8.119          goto retry;
   8.120      }
   8.121  
   8.122 @@ -350,8 +348,6 @@ int xc_hvm_save(int xc_handle, int io_fd
   8.123              ERROR("Couldn't enable shadow mode");
   8.124              goto out;
   8.125          }
   8.126 -
   8.127 -        DPRINTF("hvm domain live migration debug start: logdirty enable.\n");
   8.128      }
   8.129      else
   8.130      {
   8.131 @@ -378,7 +374,6 @@ int xc_hvm_save(int xc_handle, int io_fd
   8.132          ERROR("Error when writing to state file (1)");
   8.133          goto out;
   8.134      }
   8.135 -    
   8.136  
   8.137      /* pretend we sent all the pages last iteration */
   8.138      sent_last_iter = pfn_array_size;
   8.139 @@ -452,7 +447,7 @@ int xc_hvm_save(int xc_handle, int io_fd
   8.140          prev_pc = 0;
   8.141          N=0;
   8.142  
   8.143 -        DPRINTF("Saving HVM domain memory pages: iter %d   0%%", iter);
   8.144 +        DPRINTF("Saving memory pages: iter %d   0%%", iter);
   8.145  
   8.146          while ( N < pfn_array_size )
   8.147          {
   8.148 @@ -474,7 +469,7 @@ int xc_hvm_save(int xc_handle, int io_fd
   8.149                      pfn_array_size, NULL, 0, NULL);
   8.150                  if ( rc != pfn_array_size )
   8.151                  {
   8.152 -                    ERROR("Error peeking HVM shadow bitmap");
   8.153 +                    ERROR("Error peeking shadow bitmap");
   8.154                      goto out;
   8.155                  }
   8.156              }
   8.157 @@ -703,7 +698,7 @@ int xc_hvm_save(int xc_handle, int io_fd
   8.158      /* save vcpu/vmcs contexts */
   8.159      for ( i = 0; i < nr_vcpus; i++ )
   8.160      {
   8.161 -        if (!(vcpumap & (1ULL << i)))
   8.162 +        if ( !(vcpumap & (1ULL << i)) )
   8.163              continue;
   8.164  
   8.165          if ( xc_vcpu_getcontext(xc_handle, dom, i, &ctxt) )
     9.1 --- a/tools/libxc/xc_linux_save.c	Mon Apr 09 13:39:35 2007 -0600
     9.2 +++ b/tools/libxc/xc_linux_save.c	Mon Apr 09 13:40:25 2007 -0600
     9.3 @@ -27,7 +27,6 @@
     9.4  #define DEF_MAX_ITERS   29   /* limit us to 30 times round loop   */
     9.5  #define DEF_MAX_FACTOR   3   /* never send more than 3x p2m_size  */
     9.6  
     9.7 -
     9.8  /* max mfn of the whole machine */
     9.9  static unsigned long max_mfn;
    9.10  
    9.11 @@ -56,10 +55,9 @@ static unsigned long m2p_mfn0;
    9.12   * in the guest's pseudophysical map.
    9.13   */
    9.14  #define MFN_IS_IN_PSEUDOPHYS_MAP(_mfn)          \
    9.15 -(((_mfn) < (max_mfn)) &&                        \
    9.16 - ((mfn_to_pfn(_mfn) < (p2m_size)) &&               \
    9.17 -  (live_p2m[mfn_to_pfn(_mfn)] == (_mfn))))
    9.18 -
    9.19 +    (((_mfn) < (max_mfn)) &&                    \
    9.20 +     ((mfn_to_pfn(_mfn) < (p2m_size)) &&        \
    9.21 +      (live_p2m[mfn_to_pfn(_mfn)] == (_mfn))))
    9.22  
    9.23  /* Returns TRUE if MFN is successfully converted to a PFN. */
    9.24  #define translate_mfn_to_pfn(_pmfn)                             \
    9.25 @@ -116,7 +114,7 @@ static inline int count_bits ( int nr, v
    9.26      int i, count = 0;
    9.27      volatile unsigned long *p = (volatile unsigned long *)addr;
    9.28      /* We know that the array is padded to unsigned long. */
    9.29 -    for( i = 0; i < (nr / (sizeof(unsigned long)*8)); i++, p++ )
    9.30 +    for ( i = 0; i < (nr / (sizeof(unsigned long)*8)); i++, p++ )
    9.31          count += hweight32(*p);
    9.32      return count;
    9.33  }
    9.34 @@ -151,9 +149,6 @@ static inline int permute( int i, int nr
    9.35      return i;
    9.36  }
    9.37  
    9.38 -
    9.39 -
    9.40 -
    9.41  static uint64_t tv_to_us(struct timeval *new)
    9.42  {
    9.43      return (new->tv_sec * 1000000) + new->tv_usec;
    9.44 @@ -168,8 +163,8 @@ static uint64_t llgettimeofday(void)
    9.45  
    9.46  static uint64_t tv_delta(struct timeval *new, struct timeval *old)
    9.47  {
    9.48 -    return ((new->tv_sec - old->tv_sec)*1000000 ) +
    9.49 -        (new->tv_usec - old->tv_usec);
    9.50 +    return (((new->tv_sec - old->tv_sec)*1000000) +
    9.51 +            (new->tv_usec - old->tv_usec));
    9.52  }
    9.53  
    9.54  static int noncached_write(int fd, int live, void *buffer, int len) 
    9.55 @@ -179,8 +174,8 @@ static int noncached_write(int fd, int l
    9.56      int rc = write(fd,buffer,len);
    9.57  
    9.58      write_count += len;
    9.59 -
    9.60 -    if (write_count >= MAX_PAGECACHE_USAGE*PAGE_SIZE) {
    9.61 +    if ( write_count >= (MAX_PAGECACHE_USAGE * PAGE_SIZE) )
    9.62 +    {
    9.63          /* Time to discard cache - dont care if this fails */
    9.64          discard_file_cache(fd, 0 /* no flush */);
    9.65          write_count = 0;
    9.66 @@ -191,7 +186,6 @@ static int noncached_write(int fd, int l
    9.67  
    9.68  #ifdef ADAPTIVE_SAVE
    9.69  
    9.70 -
    9.71  /*
    9.72  ** We control the rate at which we transmit (or save) to minimize impact
    9.73  ** on running domains (including the target if we're doing live migrate).
    9.74 @@ -200,27 +194,23 @@ static int noncached_write(int fd, int l
    9.75  #define MAX_MBIT_RATE    500      /* maximum transmit rate for migrate */
    9.76  #define START_MBIT_RATE  100      /* initial transmit rate for migrate */
    9.77  
    9.78 -
    9.79  /* Scaling factor to convert between a rate (in Mb/s) and time (in usecs) */
    9.80  #define RATE_TO_BTU      781250
    9.81  
    9.82  /* Amount in bytes we allow ourselves to send in a burst */
    9.83  #define BURST_BUDGET (100*1024)
    9.84  
    9.85 -
    9.86  /* We keep track of the current and previous transmission rate */
    9.87  static int mbit_rate, ombit_rate = 0;
    9.88  
    9.89  /* Have we reached the maximum transmission rate? */
    9.90  #define RATE_IS_MAX() (mbit_rate == MAX_MBIT_RATE)
    9.91  
    9.92 -
    9.93  static inline void initialize_mbit_rate()
    9.94  {
    9.95      mbit_rate = START_MBIT_RATE;
    9.96  }
    9.97  
    9.98 -
    9.99  static int ratewrite(int io_fd, int live, void *buf, int n)
   9.100  {
   9.101      static int budget = 0;
   9.102 @@ -230,39 +220,47 @@ static int ratewrite(int io_fd, int live
   9.103      struct timespec delay;
   9.104      long long delta;
   9.105  
   9.106 -    if (START_MBIT_RATE == 0)
   9.107 +    if ( START_MBIT_RATE == 0 )
   9.108          return noncached_write(io_fd, live, buf, n);
   9.109  
   9.110      budget -= n;
   9.111 -    if (budget < 0) {
   9.112 -        if (mbit_rate != ombit_rate) {
   9.113 +    if ( budget < 0 )
   9.114 +    {
   9.115 +        if ( mbit_rate != ombit_rate )
   9.116 +        {
   9.117              burst_time_us = RATE_TO_BTU / mbit_rate;
   9.118              ombit_rate = mbit_rate;
   9.119              DPRINTF("rate limit: %d mbit/s burst budget %d slot time %d\n",
   9.120                      mbit_rate, BURST_BUDGET, burst_time_us);
   9.121          }
   9.122 -        if (last_put.tv_sec == 0) {
   9.123 +        if ( last_put.tv_sec == 0 )
   9.124 +        {
   9.125              budget += BURST_BUDGET;
   9.126              gettimeofday(&last_put, NULL);
   9.127 -        } else {
   9.128 -            while (budget < 0) {
   9.129 +        }
   9.130 +        else
   9.131 +        {
   9.132 +            while ( budget < 0 )
   9.133 +            {
   9.134                  gettimeofday(&now, NULL);
   9.135                  delta = tv_delta(&now, &last_put);
   9.136 -                while (delta > burst_time_us) {
   9.137 +                while ( delta > burst_time_us )
   9.138 +                {
   9.139                      budget += BURST_BUDGET;
   9.140                      last_put.tv_usec += burst_time_us;
   9.141 -                    if (last_put.tv_usec > 1000000) {
   9.142 +                    if ( last_put.tv_usec > 1000000 
   9.143 +                    {
   9.144                          last_put.tv_usec -= 1000000;
   9.145                          last_put.tv_sec++;
   9.146                      }
   9.147                      delta -= burst_time_us;
   9.148                  }
   9.149 -                if (budget > 0)
   9.150 +                if ( budget > 0 )
   9.151                      break;
   9.152                  delay.tv_sec = 0;
   9.153                  delay.tv_nsec = 1000 * (burst_time_us - delta);
   9.154 -                while (delay.tv_nsec > 0)
   9.155 -                    if (nanosleep(&delay, &delay) == 0)
   9.156 +                while ( delay.tv_nsec > 0 )
   9.157 +                    if ( nanosleep(&delay, &delay) == 0 )
   9.158                          break;
   9.159              }
   9.160          }
   9.161 @@ -278,16 +276,11 @@ static int ratewrite(int io_fd, int live
   9.162  
   9.163  #endif
   9.164  
   9.165 -
   9.166  static inline ssize_t write_exact(int fd, void *buf, size_t count)
   9.167  {
   9.168 -    if(write(fd, buf, count) != count)
   9.169 -        return 0;
   9.170 -    return 1;
   9.171 +    return (write(fd, buf, count) == count);
   9.172  }
   9.173  
   9.174 -
   9.175 -
   9.176  static int print_stats(int xc_handle, uint32_t domid, int pages_sent,
   9.177                         xc_shadow_op_stats_t *stats, int print)
   9.178  {
   9.179 @@ -309,15 +302,14 @@ static int print_stats(int xc_handle, ui
   9.180          DPRINTF("ARRHHH!!\n");
   9.181  
   9.182      wall_delta = tv_delta(&wall_now,&wall_last)/1000;
   9.183 -
   9.184 -    if (wall_delta == 0) wall_delta = 1;
   9.185 +    if ( wall_delta == 0 )
   9.186 +        wall_delta = 1;
   9.187  
   9.188      d0_cpu_delta = (d0_cpu_now - d0_cpu_last)/1000;
   9.189      d1_cpu_delta = (d1_cpu_now - d1_cpu_last)/1000;
   9.190  
   9.191 -    if (print)
   9.192 -        DPRINTF(
   9.193 -                "delta %lldms, dom0 %d%%, target %d%%, sent %dMb/s, "
   9.194 +    if ( print )
   9.195 +        DPRINTF("delta %lldms, dom0 %d%%, target %d%%, sent %dMb/s, "
   9.196                  "dirtied %dMb/s %" PRId32 " pages\n",
   9.197                  wall_delta,
   9.198                  (int)((d0_cpu_delta*100)/wall_delta),
   9.199 @@ -327,10 +319,11 @@ static int print_stats(int xc_handle, ui
   9.200                  stats->dirty_count);
   9.201  
   9.202  #ifdef ADAPTIVE_SAVE
   9.203 -    if (((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8))) > mbit_rate) {
   9.204 +    if ( ((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8))) > mbit_rate )
   9.205 +    {
   9.206          mbit_rate = (int)((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8)))
   9.207              + 50;
   9.208 -        if (mbit_rate > MAX_MBIT_RATE)
   9.209 +        if ( mbit_rate > MAX_MBIT_RATE )
   9.210              mbit_rate = MAX_MBIT_RATE;
   9.211      }
   9.212  #endif
   9.213 @@ -352,18 +345,19 @@ static int analysis_phase(int xc_handle,
   9.214  
   9.215      start = llgettimeofday();
   9.216  
   9.217 -    for (j = 0; j < runs; j++) {
   9.218 +    for ( j = 0; j < runs; j++ )
   9.219 +    {
   9.220          int i;
   9.221  
   9.222          xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_CLEAN,
   9.223                            arr, p2m_size, NULL, 0, NULL);
   9.224          DPRINTF("#Flush\n");
   9.225 -        for ( i = 0; i < 40; i++ ) {
   9.226 +        for ( i = 0; i < 40; i++ )
   9.227 +        {
   9.228              usleep(50000);
   9.229              now = llgettimeofday();
   9.230              xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_PEEK,
   9.231                                NULL, 0, NULL, 0, &stats);
   9.232 -
   9.233              DPRINTF("now= %lld faults= %"PRId32" dirty= %"PRId32"\n",
   9.234                      ((now-start)+500)/1000,
   9.235                      stats.fault_count, stats.dirty_count);
   9.236 @@ -380,34 +374,40 @@ static int suspend_and_state(int (*suspe
   9.237  {
   9.238      int i = 0;
   9.239  
   9.240 -    if (!(*suspend)(dom)) {
   9.241 +    if ( !(*suspend)(dom) )
   9.242 +    {
   9.243          ERROR("Suspend request failed");
   9.244          return -1;
   9.245      }
   9.246  
   9.247   retry:
   9.248  
   9.249 -    if (xc_domain_getinfo(xc_handle, dom, 1, info) != 1) {
   9.250 +    if ( xc_domain_getinfo(xc_handle, dom, 1, info) != 1 )
   9.251 +    {
   9.252          ERROR("Could not get domain info");
   9.253          return -1;
   9.254      }
   9.255  
   9.256 -    if ( xc_vcpu_getcontext(xc_handle, dom, 0 /* XXX */, ctxt))
   9.257 +    if ( xc_vcpu_getcontext(xc_handle, dom, 0, ctxt) )
   9.258          ERROR("Could not get vcpu context");
   9.259  
   9.260  
   9.261 -    if (info->dying) {
   9.262 +    if ( info->dying )
   9.263 +    {
   9.264          ERROR("domain is dying");
   9.265          return -1;
   9.266      }
   9.267  
   9.268 -    if (info->crashed) {
   9.269 +    if ( info->crashed )
   9.270 +    {
   9.271          ERROR("domain has crashed");
   9.272          return -1;
   9.273      }
   9.274  
   9.275 -    if (info->shutdown) {
   9.276 -        switch (info->shutdown_reason) {
   9.277 +    if ( info->shutdown )
   9.278 +    {
   9.279 +        switch ( info->shutdown_reason )
   9.280 +        {
   9.281          case SHUTDOWN_poweroff:
   9.282          case SHUTDOWN_reboot:
   9.283              ERROR("domain has shut down");
   9.284 @@ -420,20 +420,19 @@ static int suspend_and_state(int (*suspe
   9.285          }
   9.286      }
   9.287  
   9.288 -    if (info->paused) {
   9.289 -        // try unpausing domain, wait, and retest
   9.290 +    if ( info->paused )
   9.291 +    {
   9.292 +        /* Try unpausing domain, wait, and retest. */
   9.293          xc_domain_unpause( xc_handle, dom );
   9.294 -
   9.295          ERROR("Domain was paused. Wait and re-test.");
   9.296 -        usleep(10000);  // 10ms
   9.297 -
   9.298 +        usleep(10000); /* 10ms */
   9.299          goto retry;
   9.300      }
   9.301  
   9.302 -
   9.303 -    if( ++i < 100 ) {
   9.304 +    if ( ++i < 100 )
   9.305 +    {
   9.306          ERROR("Retry suspend domain");
   9.307 -        usleep(10000);  // 10ms
   9.308 +        usleep(10000); /* 10ms */
   9.309          goto retry;
   9.310      }
   9.311  
   9.312 @@ -453,18 +452,18 @@ static void *map_frame_list_list(int xc_
   9.313      int count = 100;
   9.314      void *p;
   9.315  
   9.316 -    while (count-- && shinfo->arch.pfn_to_mfn_frame_list_list == 0)
   9.317 +    while ( count-- && (shinfo->arch.pfn_to_mfn_frame_list_list == 0) )
   9.318          usleep(10000);
   9.319  
   9.320 -    if (shinfo->arch.pfn_to_mfn_frame_list_list == 0) {
   9.321 +    if ( shinfo->arch.pfn_to_mfn_frame_list_list == 0 )
   9.322 +    {
   9.323          ERROR("Timed out waiting for frame list updated.");
   9.324          return NULL;
   9.325      }
   9.326  
   9.327      p = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ,
   9.328                               shinfo->arch.pfn_to_mfn_frame_list_list);
   9.329 -
   9.330 -    if (p == NULL)
   9.331 +    if ( p == NULL )
   9.332          ERROR("Couldn't map p2m_frame_list_list (errno %d)", errno);
   9.333  
   9.334      return p;
   9.335 @@ -493,10 +492,10 @@ static int canonicalize_pagetable(unsign
   9.336      */
   9.337      xen_start = xen_end = pte_last = PAGE_SIZE / ((pt_levels == 2) ? 4 : 8);
   9.338  
   9.339 -    if (pt_levels == 2 && type == XEN_DOMCTL_PFINFO_L2TAB)
   9.340 +    if ( (pt_levels == 2) && (type == XEN_DOMCTL_PFINFO_L2TAB) )
   9.341          xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT);
   9.342  
   9.343 -    if (pt_levels == 3 && type == XEN_DOMCTL_PFINFO_L3TAB)
   9.344 +    if ( (pt_levels == 3) && (type == XEN_DOMCTL_PFINFO_L3TAB) )
   9.345          xen_start = L3_PAGETABLE_ENTRIES_PAE;
   9.346  
   9.347      /*
   9.348 @@ -505,27 +504,31 @@ static int canonicalize_pagetable(unsign
   9.349      ** Xen always ensures is present in that L2. Guests must ensure
   9.350      ** that this check will fail for other L2s.
   9.351      */
   9.352 -    if (pt_levels == 3 && type == XEN_DOMCTL_PFINFO_L2TAB) {
   9.353 +    if ( (pt_levels == 3) && (type == XEN_DOMCTL_PFINFO_L2TAB) )
   9.354 +    {
   9.355          int hstart;
   9.356          uint64_t he;
   9.357  
   9.358          hstart = (hvirt_start >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff;
   9.359          he = ((const uint64_t *) spage)[hstart];
   9.360  
   9.361 -        if ( ((he >> PAGE_SHIFT) & MFN_MASK_X86) == m2p_mfn0 ) {
   9.362 +        if ( ((he >> PAGE_SHIFT) & MFN_MASK_X86) == m2p_mfn0 )
   9.363 +        {
   9.364              /* hvirt starts with xen stuff... */
   9.365              xen_start = hstart;
   9.366 -        } else if ( hvirt_start != 0xf5800000 ) {
   9.367 +        }
   9.368 +        else if ( hvirt_start != 0xf5800000 )
   9.369 +        {
   9.370              /* old L2s from before hole was shrunk... */
   9.371              hstart = (0xf5800000 >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff;
   9.372              he = ((const uint64_t *) spage)[hstart];
   9.373 -
   9.374 -            if( ((he >> PAGE_SHIFT) & MFN_MASK_X86) == m2p_mfn0 )
   9.375 +            if ( ((he >> PAGE_SHIFT) & MFN_MASK_X86) == m2p_mfn0 )
   9.376                  xen_start = hstart;
   9.377          }
   9.378      }
   9.379  
   9.380 -    if (pt_levels == 4 && type == XEN_DOMCTL_PFINFO_L4TAB) {
   9.381 +    if ( (pt_levels == 4) && (type == XEN_DOMCTL_PFINFO_L4TAB) )
   9.382 +    {
   9.383          /*
   9.384          ** XXX SMH: should compute these from hvirt_start (which we have)
   9.385          ** and hvirt_end (which we don't)
   9.386 @@ -535,27 +538,29 @@ static int canonicalize_pagetable(unsign
   9.387      }
   9.388  
   9.389      /* Now iterate through the page table, canonicalizing each PTE */
   9.390 -    for (i = 0; i < pte_last; i++ ) {
   9.391 -
   9.392 +    for (i = 0; i < pte_last; i++ )
   9.393 +    {
   9.394          unsigned long pfn, mfn;
   9.395  
   9.396 -        if (pt_levels == 2)
   9.397 +        if ( pt_levels == 2 )
   9.398              pte = ((const uint32_t*)spage)[i];
   9.399          else
   9.400              pte = ((const uint64_t*)spage)[i];
   9.401  
   9.402 -        if (i >= xen_start && i < xen_end)
   9.403 +        if ( (i >= xen_start) && (i < xen_end) )
   9.404              pte = 0;
   9.405  
   9.406 -        if (pte & _PAGE_PRESENT) {
   9.407 -
   9.408 +        if ( pte & _PAGE_PRESENT )
   9.409 +        {
   9.410              mfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86;
   9.411 -            if (!MFN_IS_IN_PSEUDOPHYS_MAP(mfn)) {
   9.412 +            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(mfn) )
   9.413 +            {
   9.414                  /* This will happen if the type info is stale which
   9.415                     is quite feasible under live migration */
   9.416                  pfn  = 0;  /* zap it - we'll retransmit this page later */
   9.417                  race = 1;  /* inform the caller of race; fatal if !live */ 
   9.418 -            } else
   9.419 +            }
   9.420 +            else
   9.421                  pfn = mfn_to_pfn(mfn);
   9.422  
   9.423              pte &= ~MADDR_MASK_X86;
   9.424 @@ -566,26 +571,21 @@ static int canonicalize_pagetable(unsign
   9.425               * a 64bit hypervisor. We zap these here to avoid any
   9.426               * surprise at restore time...
   9.427               */
   9.428 -            if ( pt_levels == 3 &&
   9.429 -                 type == XEN_DOMCTL_PFINFO_L3TAB &&
   9.430 -                 pte & (_PAGE_USER|_PAGE_RW|_PAGE_ACCESSED) )
   9.431 -            {
   9.432 +            if ( (pt_levels == 3) &&
   9.433 +                 (type == XEN_DOMCTL_PFINFO_L3TAB) &&
   9.434 +                 (pte & (_PAGE_USER|_PAGE_RW|_PAGE_ACCESSED)) )
   9.435                  pte &= ~(_PAGE_USER|_PAGE_RW|_PAGE_ACCESSED);
   9.436 -            }
   9.437          }
   9.438  
   9.439 -        if (pt_levels == 2)
   9.440 +        if ( pt_levels == 2 )
   9.441              ((uint32_t*)dpage)[i] = pte;
   9.442          else
   9.443              ((uint64_t*)dpage)[i] = pte;
   9.444 -
   9.445      }
   9.446  
   9.447 -    return race; 
   9.448 +    return race;
   9.449  }
   9.450  
   9.451 -
   9.452 -
   9.453  static xen_pfn_t *xc_map_m2p(int xc_handle,
   9.454                                   unsigned long max_mfn,
   9.455                                   int prot)
   9.456 @@ -601,37 +601,43 @@ static xen_pfn_t *xc_map_m2p(int xc_hand
   9.457      m2p_chunks = M2P_CHUNKS(max_mfn);
   9.458  
   9.459      xmml.max_extents = m2p_chunks;
   9.460 -    if (!(extent_start = malloc(m2p_chunks * sizeof(xen_pfn_t)))) {
   9.461 +    if ( !(extent_start = malloc(m2p_chunks * sizeof(xen_pfn_t))) )
   9.462 +    {
   9.463          ERROR("failed to allocate space for m2p mfns");
   9.464          return NULL;
   9.465      }
   9.466      set_xen_guest_handle(xmml.extent_start, extent_start);
   9.467  
   9.468 -    if (xc_memory_op(xc_handle, XENMEM_machphys_mfn_list, &xmml) ||
   9.469 -        (xmml.nr_extents != m2p_chunks)) {
   9.470 +    if ( xc_memory_op(xc_handle, XENMEM_machphys_mfn_list, &xmml) ||
   9.471 +         (xmml.nr_extents != m2p_chunks) )
   9.472 +    {
   9.473          ERROR("xc_get_m2p_mfns");
   9.474          return NULL;
   9.475      }
   9.476  
   9.477 -    if ((m2p = mmap(NULL, m2p_size, prot,
   9.478 -                    MAP_SHARED, xc_handle, 0)) == MAP_FAILED) {
   9.479 +    if ( (m2p = mmap(NULL, m2p_size, prot,
   9.480 +                     MAP_SHARED, xc_handle, 0)) == MAP_FAILED )
   9.481 +    {
   9.482          ERROR("failed to mmap m2p");
   9.483          return NULL;
   9.484      }
   9.485  
   9.486 -    if (!(entries = malloc(m2p_chunks * sizeof(privcmd_mmap_entry_t)))) {
   9.487 +    if ( !(entries = malloc(m2p_chunks * sizeof(privcmd_mmap_entry_t))) )
   9.488 +    {
   9.489          ERROR("failed to allocate space for mmap entries");
   9.490          return NULL;
   9.491      }
   9.492  
   9.493 -    for (i=0; i < m2p_chunks; i++) {
   9.494 +    for ( i = 0; i < m2p_chunks; i++ )
   9.495 +    {
   9.496          entries[i].va = (unsigned long)(((void *)m2p) + (i * M2P_CHUNK_SIZE));
   9.497          entries[i].mfn = extent_start[i];
   9.498          entries[i].npages = M2P_CHUNK_SIZE >> PAGE_SHIFT;
   9.499      }
   9.500  
   9.501 -    if ((rc = xc_map_foreign_ranges(xc_handle, DOMID_XEN,
   9.502 -        entries, m2p_chunks)) < 0) {
   9.503 +    if ( (rc = xc_map_foreign_ranges(xc_handle, DOMID_XEN,
   9.504 +                                     entries, m2p_chunks)) < 0 )
   9.505 +    {
   9.506          ERROR("xc_mmap_foreign_ranges failed (rc = %d)", rc);
   9.507          return NULL;
   9.508      }
   9.509 @@ -644,8 +650,6 @@ static xen_pfn_t *xc_map_m2p(int xc_hand
   9.510      return m2p;
   9.511  }
   9.512  
   9.513 -
   9.514 -
   9.515  int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
   9.516                    uint32_t max_factor, uint32_t flags, int (*suspend)(int))
   9.517  {
   9.518 @@ -699,33 +703,35 @@ int xc_linux_save(int xc_handle, int io_
   9.519      uint64_t vcpumap = 1ULL;
   9.520  
   9.521      /* If no explicit control parameters given, use defaults */
   9.522 -    if(!max_iters)
   9.523 -        max_iters = DEF_MAX_ITERS;
   9.524 -    if(!max_factor)
   9.525 -        max_factor = DEF_MAX_FACTOR;
   9.526 +    max_iters  = max_iters  ? : DEF_MAX_ITERS;
   9.527 +    max_factor = max_factor ? : DEF_MAX_FACTOR;
   9.528  
   9.529      initialize_mbit_rate();
   9.530  
   9.531 -    if(!get_platform_info(xc_handle, dom,
   9.532 -                          &max_mfn, &hvirt_start, &pt_levels)) {
   9.533 +    if ( !get_platform_info(xc_handle, dom,
   9.534 +                            &max_mfn, &hvirt_start, &pt_levels) )
   9.535 +    {
   9.536          ERROR("Unable to get platform info.");
   9.537          return 1;
   9.538      }
   9.539  
   9.540 -    if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
   9.541 +    if ( xc_domain_getinfo(xc_handle, dom, 1, &info) != 1 )
   9.542 +    {
   9.543          ERROR("Could not get domain info");
   9.544          return 1;
   9.545      }
   9.546  
   9.547 -    if (xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt)) {
   9.548 +    if ( xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt) )
   9.549 +    {
   9.550          ERROR("Could not get vcpu context");
   9.551          goto out;
   9.552      }
   9.553      shared_info_frame = info.shared_info_frame;
   9.554  
   9.555      /* Map the shared info frame */
   9.556 -    if(!(live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   9.557 -                                            PROT_READ, shared_info_frame))) {
   9.558 +    if ( !(live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   9.559 +                                              PROT_READ, shared_info_frame)) )
   9.560 +    {
   9.561          ERROR("Couldn't map live_shinfo");
   9.562          goto out;
   9.563      }
   9.564 @@ -734,16 +740,15 @@ int xc_linux_save(int xc_handle, int io_
   9.565  
   9.566      live_p2m_frame_list_list = map_frame_list_list(xc_handle, dom,
   9.567                                                     live_shinfo);
   9.568 -
   9.569 -    if (!live_p2m_frame_list_list)
   9.570 +    if ( !live_p2m_frame_list_list )
   9.571          goto out;
   9.572  
   9.573      live_p2m_frame_list =
   9.574          xc_map_foreign_batch(xc_handle, dom, PROT_READ,
   9.575                               live_p2m_frame_list_list,
   9.576                               P2M_FLL_ENTRIES);
   9.577 -
   9.578 -    if (!live_p2m_frame_list) {
   9.579 +    if ( !live_p2m_frame_list )
   9.580 +    {
   9.581          ERROR("Couldn't map p2m_frame_list");
   9.582          goto out;
   9.583      }
   9.584 @@ -756,69 +761,70 @@ int xc_linux_save(int xc_handle, int io_
   9.585      live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_READ,
   9.586                                      live_p2m_frame_list,
   9.587                                      P2M_FL_ENTRIES);
   9.588 -
   9.589 -    if (!live_p2m) {
   9.590 +    if ( !live_p2m )
   9.591 +    {
   9.592          ERROR("Couldn't map p2m table");
   9.593          goto out;
   9.594      }
   9.595  
   9.596      /* Setup the mfn_to_pfn table mapping */
   9.597 -    if(!(live_m2p = xc_map_m2p(xc_handle, max_mfn, PROT_READ))) {
   9.598 +    if ( !(live_m2p = xc_map_m2p(xc_handle, max_mfn, PROT_READ)) )
   9.599 +    {
   9.600          ERROR("Failed to map live M2P table");
   9.601          goto out;
   9.602      }
   9.603  
   9.604  
   9.605      /* Get a local copy of the live_P2M_frame_list */
   9.606 -    if(!(p2m_frame_list = malloc(P2M_FL_SIZE))) {
   9.607 +    if ( !(p2m_frame_list = malloc(P2M_FL_SIZE)) )
   9.608 +    {
   9.609          ERROR("Couldn't allocate p2m_frame_list array");
   9.610          goto out;
   9.611      }
   9.612      memcpy(p2m_frame_list, live_p2m_frame_list, P2M_FL_SIZE);
   9.613  
   9.614      /* Canonicalise the pfn-to-mfn table frame-number list. */
   9.615 -    for (i = 0; i < p2m_size; i += fpp) {
   9.616 -        if (!translate_mfn_to_pfn(&p2m_frame_list[i/fpp])) {
   9.617 +    for ( i = 0; i < p2m_size; i += fpp )
   9.618 +    {
   9.619 +        if ( !translate_mfn_to_pfn(&p2m_frame_list[i/fpp]) )
   9.620 +        {
   9.621              ERROR("Frame# in pfn-to-mfn frame list is not in pseudophys");
   9.622              ERROR("entry %d: p2m_frame_list[%ld] is 0x%"PRIx64, i, i/fpp,
   9.623 -                (uint64_t)p2m_frame_list[i/fpp]);
   9.624 +                  (uint64_t)p2m_frame_list[i/fpp]);
   9.625              goto out;
   9.626          }
   9.627      }
   9.628  
   9.629      /* Domain is still running at this point */
   9.630 -    if (live) {
   9.631 -
   9.632 -        if (xc_shadow_control(xc_handle, dom,
   9.633 -                              XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
   9.634 -                              NULL, 0, NULL, 0, NULL) < 0) {
   9.635 +    if ( live )
   9.636 +    {
   9.637 +        /* Live suspend. Enable log-dirty mode. */
   9.638 +        if ( xc_shadow_control(xc_handle, dom,
   9.639 +                               XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
   9.640 +                               NULL, 0, NULL, 0, NULL) < 0 )
   9.641 +        {
   9.642              ERROR("Couldn't enable shadow mode");
   9.643              goto out;
   9.644          }
   9.645 -
   9.646 -        last_iter = 0;
   9.647 -
   9.648 -    } else {
   9.649 -
   9.650 -        /* This is a non-live suspend. Issue the call back to get the
   9.651 -           domain suspended */
   9.652 -
   9.653 -        last_iter = 1;
   9.654 -
   9.655 -        if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info, &ctxt)) {
   9.656 +    }
   9.657 +    else
   9.658 +    {
   9.659 +        /* This is a non-live suspend. Suspend the domain .*/
   9.660 +        if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info, &ctxt) )
   9.661 +        {
   9.662              ERROR("Domain appears not to have suspended");
   9.663              goto out;
   9.664          }
   9.665 +    }
   9.666  
   9.667 -    }
   9.668 +    last_iter = !live;
   9.669  
   9.670      /* pretend we sent all the pages last iteration */
   9.671      sent_last_iter = p2m_size;
   9.672  
   9.673 -
   9.674      /* calculate the power of 2 order of p2m_size, e.g.
   9.675         15->4 16->4 17->5 */
   9.676 -    for (i = p2m_size-1, order_nr = 0; i ; i >>= 1, order_nr++)
   9.677 +    for ( i = p2m_size-1, order_nr = 0; i ; i >>= 1, order_nr++ )
   9.678          continue;
   9.679  
   9.680      /* Setup to_send / to_fix and to_skip bitmaps */
   9.681 @@ -826,20 +832,23 @@ int xc_linux_save(int xc_handle, int io_
   9.682      to_fix  = calloc(1, BITMAP_SIZE);
   9.683      to_skip = malloc(BITMAP_SIZE);
   9.684  
   9.685 -    if (!to_send || !to_fix || !to_skip) {
   9.686 +    if ( !to_send || !to_fix || !to_skip )
   9.687 +    {
   9.688          ERROR("Couldn't allocate to_send array");
   9.689          goto out;
   9.690      }
   9.691  
   9.692      memset(to_send, 0xff, BITMAP_SIZE);
   9.693  
   9.694 -    if (lock_pages(to_send, BITMAP_SIZE)) {
   9.695 +    if ( lock_pages(to_send, BITMAP_SIZE) )
   9.696 +    {
   9.697          ERROR("Unable to lock to_send");
   9.698          return 1;
   9.699      }
   9.700  
   9.701      /* (to fix is local only) */
   9.702 -    if (lock_pages(to_skip, BITMAP_SIZE)) {
   9.703 +    if ( lock_pages(to_skip, BITMAP_SIZE) )
   9.704 +    {
   9.705          ERROR("Unable to lock to_skip");
   9.706          return 1;
   9.707      }
   9.708 @@ -849,14 +858,15 @@ int xc_linux_save(int xc_handle, int io_
   9.709      /* We want zeroed memory so use calloc rather than malloc. */
   9.710      pfn_type   = calloc(MAX_BATCH_SIZE, sizeof(*pfn_type));
   9.711      pfn_batch  = calloc(MAX_BATCH_SIZE, sizeof(*pfn_batch));
   9.712 -
   9.713 -    if ((pfn_type == NULL) || (pfn_batch == NULL)) {
   9.714 +    if ( (pfn_type == NULL) || (pfn_batch == NULL) )
   9.715 +    {
   9.716          ERROR("failed to alloc memory for pfn_type and/or pfn_batch arrays");
   9.717          errno = ENOMEM;
   9.718          goto out;
   9.719      }
   9.720  
   9.721 -    if (lock_pages(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type))) {
   9.722 +    if ( lock_pages(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type)) )
   9.723 +    {
   9.724          ERROR("Unable to lock");
   9.725          goto out;
   9.726      }
   9.727 @@ -867,10 +877,11 @@ int xc_linux_save(int xc_handle, int io_
   9.728      {
   9.729          int err=0;
   9.730          unsigned long mfn;
   9.731 -        for (i = 0; i < p2m_size; i++) {
   9.732 -
   9.733 +        for ( i = 0; i < p2m_size; i++ )
   9.734 +        {
   9.735              mfn = live_p2m[i];
   9.736 -            if((mfn != INVALID_P2M_ENTRY) && (mfn_to_pfn(mfn) != i)) {
   9.737 +            if( (mfn != INVALID_P2M_ENTRY) && (mfn_to_pfn(mfn) != i) )
   9.738 +            {
   9.739                  DPRINTF("i=0x%x mfn=%lx live_m2p=%lx\n", i,
   9.740                          mfn, mfn_to_pfn(mfn));
   9.741                  err++;
   9.742 @@ -879,10 +890,9 @@ int xc_linux_save(int xc_handle, int io_
   9.743          DPRINTF("Had %d unexplained entries in p2m table\n", err);
   9.744      }
   9.745  
   9.746 -
   9.747      /* Start writing out the saved-domain record. */
   9.748 -
   9.749 -    if (!write_exact(io_fd, &p2m_size, sizeof(unsigned long))) {
   9.750 +    if ( !write_exact(io_fd, &p2m_size, sizeof(unsigned long)) )
   9.751 +    {
   9.752          ERROR("write: p2m_size");
   9.753          goto out;
   9.754      }
   9.755 @@ -892,23 +902,26 @@ int xc_linux_save(int xc_handle, int io_
   9.756       * a PAE guest understands extended CR3 (PDPTs above 4GB). Turns off
   9.757       * slow paths in the restore code.
   9.758       */
   9.759 -    if ((pt_levels == 3) &&
   9.760 -        (ctxt.vm_assist & (1UL << VMASST_TYPE_pae_extended_cr3))) {
   9.761 +    if ( (pt_levels == 3) &&
   9.762 +         (ctxt.vm_assist & (1UL << VMASST_TYPE_pae_extended_cr3)) )
   9.763 +    {
   9.764          unsigned long signature = ~0UL;
   9.765          uint32_t tot_sz   = sizeof(struct vcpu_guest_context) + 8;
   9.766          uint32_t chunk_sz = sizeof(struct vcpu_guest_context);
   9.767          char chunk_sig[]  = "vcpu";
   9.768 -        if (!write_exact(io_fd, &signature, sizeof(signature)) ||
   9.769 -            !write_exact(io_fd, &tot_sz,    sizeof(tot_sz)) ||
   9.770 -            !write_exact(io_fd, &chunk_sig, 4) ||
   9.771 -            !write_exact(io_fd, &chunk_sz,  sizeof(chunk_sz)) ||
   9.772 -            !write_exact(io_fd, &ctxt,      sizeof(ctxt))) {
   9.773 +        if ( !write_exact(io_fd, &signature, sizeof(signature)) ||
   9.774 +             !write_exact(io_fd, &tot_sz,    sizeof(tot_sz)) ||
   9.775 +             !write_exact(io_fd, &chunk_sig, 4) ||
   9.776 +             !write_exact(io_fd, &chunk_sz,  sizeof(chunk_sz)) ||
   9.777 +             !write_exact(io_fd, &ctxt,      sizeof(ctxt)) )
   9.778 +        {
   9.779              ERROR("write: extended info");
   9.780              goto out;
   9.781          }
   9.782      }
   9.783  
   9.784 -    if (!write_exact(io_fd, p2m_frame_list, P2M_FL_SIZE)) {
   9.785 +    if ( !write_exact(io_fd, p2m_frame_list, P2M_FL_SIZE) )
   9.786 +    {
   9.787          ERROR("write: p2m_frame_list");
   9.788          goto out;
   9.789      }
   9.790 @@ -916,57 +929,65 @@ int xc_linux_save(int xc_handle, int io_
   9.791      print_stats(xc_handle, dom, 0, &stats, 0);
   9.792  
   9.793      /* Now write out each data page, canonicalising page tables as we go... */
   9.794 -
   9.795 -    while(1) {
   9.796 -
   9.797 +    for ( ; ; )
   9.798 +    {
   9.799          unsigned int prev_pc, sent_this_iter, N, batch;
   9.800  
   9.801          iter++;
   9.802          sent_this_iter = 0;
   9.803          skip_this_iter = 0;
   9.804          prev_pc = 0;
   9.805 -        N=0;
   9.806 +        N = 0;
   9.807  
   9.808          DPRINTF("Saving memory pages: iter %d   0%%", iter);
   9.809  
   9.810 -        while( N < p2m_size ){
   9.811 -
   9.812 +        while ( N < p2m_size )
   9.813 +        {
   9.814              unsigned int this_pc = (N * 100) / p2m_size;
   9.815 +            int rc;
   9.816  
   9.817 -            if ((this_pc - prev_pc) >= 5) {
   9.818 +            if ( (this_pc - prev_pc) >= 5 )
   9.819 +            {
   9.820                  DPRINTF("\b\b\b\b%3d%%", this_pc);
   9.821                  prev_pc = this_pc;
   9.822              }
   9.823  
   9.824 -            /* slightly wasteful to peek the whole array evey time,
   9.825 -               but this is fast enough for the moment. */
   9.826 -            if (!last_iter && xc_shadow_control(
   9.827 -                    xc_handle, dom, XEN_DOMCTL_SHADOW_OP_PEEK,
   9.828 -                    to_skip, p2m_size, NULL, 0, NULL) != p2m_size) {
   9.829 -                ERROR("Error peeking shadow bitmap");
   9.830 -                goto out;
   9.831 +            if ( !last_iter )
   9.832 +            {
   9.833 +                /* Slightly wasteful to peek the whole array evey time,
   9.834 +                   but this is fast enough for the moment. */
   9.835 +                rc = xc_shadow_control(
   9.836 +                    xc_handle, dom, XEN_DOMCTL_SHADOW_OP_PEEK, to_skip, 
   9.837 +                    p2m_size, NULL, 0, NULL);
   9.838 +                if ( rc != p2m_size )
   9.839 +                {
   9.840 +                    ERROR("Error peeking shadow bitmap");
   9.841 +                    goto out;
   9.842 +                }
   9.843              }
   9.844  
   9.845 -
   9.846              /* load pfn_type[] with the mfn of all the pages we're doing in
   9.847                 this batch. */
   9.848 -            for (batch = 0; batch < MAX_BATCH_SIZE && N < p2m_size ; N++) {
   9.849 -
   9.850 +            for  ( batch = 0;
   9.851 +                   (batch < MAX_BATCH_SIZE) && (N < p2m_size);
   9.852 +                   N++ )
   9.853 +            {
   9.854                  int n = permute(N, p2m_size, order_nr);
   9.855  
   9.856 -                if (debug) {
   9.857 +                if ( debug )
   9.858                      DPRINTF("%d pfn= %08lx mfn= %08lx %d  [mfn]= %08lx\n",
   9.859                              iter, (unsigned long)n, live_p2m[n],
   9.860                              test_bit(n, to_send),
   9.861                              mfn_to_pfn(live_p2m[n]&0xFFFFF));
   9.862 -                }
   9.863  
   9.864 -                if (!last_iter && test_bit(n, to_send)&& test_bit(n, to_skip))
   9.865 +                if ( !last_iter &&
   9.866 +                     test_bit(n, to_send) &&
   9.867 +                     test_bit(n, to_skip) )
   9.868                      skip_this_iter++; /* stats keeping */
   9.869  
   9.870 -                if (!((test_bit(n, to_send) && !test_bit(n, to_skip)) ||
   9.871 -                      (test_bit(n, to_send) && last_iter) ||
   9.872 -                      (test_bit(n, to_fix)  && last_iter)))
   9.873 +                if ( !((test_bit(n, to_send) && !test_bit(n, to_skip)) ||
   9.874 +                       (test_bit(n, to_send) && last_iter) ||
   9.875 +                       (test_bit(n, to_fix)  && last_iter)) )
   9.876                      continue;
   9.877  
   9.878                  /*
   9.879 @@ -979,20 +1000,22 @@ int xc_linux_save(int xc_handle, int io_
   9.880                  pfn_batch[batch] = n;
   9.881                  pfn_type[batch]  = live_p2m[n];
   9.882  
   9.883 -                if(!is_mapped(pfn_type[batch])) {
   9.884 -
   9.885 +                if ( !is_mapped(pfn_type[batch]) )
   9.886 +                {
   9.887                      /*
   9.888                      ** not currently in psuedo-physical map -- set bit
   9.889                      ** in to_fix since we must send this page in last_iter
   9.890                      ** unless its sent sooner anyhow, or it never enters
   9.891                      ** pseudo-physical map (e.g. for ballooned down domains)
   9.892                      */
   9.893 -
   9.894                      set_bit(n, to_fix);
   9.895                      continue;
   9.896                  }
   9.897  
   9.898 -                if(last_iter && test_bit(n, to_fix) && !test_bit(n, to_send)) {
   9.899 +                if ( last_iter &&
   9.900 +                     test_bit(n, to_fix) &&
   9.901 +                     !test_bit(n, to_send) )
   9.902 +                {
   9.903                      needed_to_fix++;
   9.904                      DPRINTF("Fix! iter %d, pfn %x. mfn %lx\n",
   9.905                              iter, n, pfn_type[batch]);
   9.906 @@ -1003,11 +1026,13 @@ int xc_linux_save(int xc_handle, int io_
   9.907                  batch++;
   9.908              }
   9.909  
   9.910 -            if (batch == 0)
   9.911 +            if ( batch == 0 )
   9.912                  goto skip; /* vanishingly unlikely... */
   9.913  
   9.914 -            if ((region_base = xc_map_foreign_batch(
   9.915 -                     xc_handle, dom, PROT_READ, pfn_type, batch)) == 0) {
   9.916 +            region_base = xc_map_foreign_batch(
   9.917 +                xc_handle, dom, PROT_READ, pfn_type, batch);
   9.918 +            if ( region_base == NULL )
   9.919 +            {
   9.920                  ERROR("map batch failed");
   9.921                  goto out;
   9.922              }
   9.923 @@ -1033,7 +1058,7 @@ int xc_linux_save(int xc_handle, int io_
   9.924                      continue;
   9.925                  }
   9.926  
   9.927 -                if (debug)
   9.928 +                if ( debug )
   9.929                      DPRINTF("%d pfn= %08lx mfn= %08lx [mfn]= %08lx"
   9.930                              " sum= %08lx\n",
   9.931                              iter,
   9.932 @@ -1049,13 +1074,15 @@ int xc_linux_save(int xc_handle, int io_
   9.933                      pfn_batch[j];
   9.934              }
   9.935  
   9.936 -            if(!write_exact(io_fd, &batch, sizeof(unsigned int))) {
   9.937 +            if ( !write_exact(io_fd, &batch, sizeof(unsigned int)) )
   9.938 +            {
   9.939                  ERROR("Error when writing to state file (2) (errno %d)",
   9.940                        errno);
   9.941                  goto out;
   9.942              }
   9.943  
   9.944 -            if(!write_exact(io_fd, pfn_type, sizeof(unsigned long)*j)) {
   9.945 +            if ( !write_exact(io_fd, pfn_type, sizeof(unsigned long)*j) )
   9.946 +            {
   9.947                  ERROR("Error when writing to state file (3) (errno %d)",
   9.948                        errno);
   9.949                  goto out;
   9.950 @@ -1083,22 +1110,26 @@ int xc_linux_save(int xc_handle, int io_
   9.951                      race = 
   9.952                          canonicalize_pagetable(pagetype, pfn, spage, page); 
   9.953  
   9.954 -                    if(race && !live) {
   9.955 +                    if ( race && !live )
   9.956 +                    {
   9.957                          ERROR("Fatal PT race (pfn %lx, type %08lx)", pfn,
   9.958                                pagetype);
   9.959                          goto out;
   9.960                      }
   9.961  
   9.962 -                    if (ratewrite(io_fd, live, page, PAGE_SIZE) != PAGE_SIZE) {
   9.963 +                    if ( ratewrite(io_fd, live, page, PAGE_SIZE) != PAGE_SIZE )
   9.964 +                    {
   9.965                          ERROR("Error when writing to state file (4)"
   9.966                                " (errno %d)", errno);
   9.967                          goto out;
   9.968                      }
   9.969 -
   9.970 -                }  else {
   9.971 -
   9.972 +                }
   9.973 +                else
   9.974 +                {
   9.975                      /* We have a normal page: just write it directly. */
   9.976 -                    if (ratewrite(io_fd, live, spage, PAGE_SIZE) != PAGE_SIZE) {
   9.977 +                    if ( ratewrite(io_fd, live, spage, PAGE_SIZE) !=
   9.978 +                         PAGE_SIZE )
   9.979 +                    {
   9.980                          ERROR("Error when writing to state file (5)"
   9.981                                " (errno %d)", errno);
   9.982                          goto out;
   9.983 @@ -1119,7 +1150,8 @@ int xc_linux_save(int xc_handle, int io_
   9.984          DPRINTF("\r %d: sent %d, skipped %d, ",
   9.985                  iter, sent_this_iter, skip_this_iter );
   9.986  
   9.987 -        if (last_iter) {
   9.988 +        if ( last_iter )
   9.989 +        {
   9.990              print_stats( xc_handle, dom, sent_this_iter, &stats, 1);
   9.991  
   9.992              DPRINTF("Total pages sent= %ld (%.2fx)\n",
   9.993 @@ -1127,14 +1159,16 @@ int xc_linux_save(int xc_handle, int io_
   9.994              DPRINTF("(of which %ld were fixups)\n", needed_to_fix  );
   9.995          }
   9.996  
   9.997 -        if (last_iter && debug) {
   9.998 +        if ( last_iter && debug )
   9.999 +        {
  9.1000              int minusone = -1;
  9.1001              memset(to_send, 0xff, BITMAP_SIZE);
  9.1002              debug = 0;
  9.1003              DPRINTF("Entering debug resend-all mode\n");
  9.1004  
  9.1005              /* send "-1" to put receiver into debug mode */
  9.1006 -            if(!write_exact(io_fd, &minusone, sizeof(int))) {
  9.1007 +            if ( !write_exact(io_fd, &minusone, sizeof(int)) )
  9.1008 +            {
  9.1009                  ERROR("Error when writing to state file (6) (errno %d)",
  9.1010                        errno);
  9.1011                  goto out;
  9.1012 @@ -1143,19 +1177,22 @@ int xc_linux_save(int xc_handle, int io_
  9.1013              continue;
  9.1014          }
  9.1015  
  9.1016 -        if (last_iter)
  9.1017 +        if ( last_iter )
  9.1018              break;
  9.1019  
  9.1020 -        if (live) {
  9.1021 -            if (((sent_this_iter > sent_last_iter) && RATE_IS_MAX()) ||
  9.1022 -                (iter >= max_iters) ||
  9.1023 -                (sent_this_iter+skip_this_iter < 50) ||
  9.1024 -                (total_sent > p2m_size*max_factor)) {
  9.1025 +        if ( live )
  9.1026 +        {
  9.1027 +            if ( ((sent_this_iter > sent_last_iter) && RATE_IS_MAX()) ||
  9.1028 +                 (iter >= max_iters) ||
  9.1029 +                 (sent_this_iter+skip_this_iter < 50) ||
  9.1030 +                 (total_sent > p2m_size*max_factor) )
  9.1031 +            {
  9.1032                  DPRINTF("Start last iteration\n");
  9.1033                  last_iter = 1;
  9.1034  
  9.1035 -                if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info,
  9.1036 -                                      &ctxt)) {
  9.1037 +                if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info,
  9.1038 +                                       &ctxt) )
  9.1039 +                {
  9.1040                      ERROR("Domain appears not to have suspended");
  9.1041                      goto out;
  9.1042                  }
  9.1043 @@ -1166,9 +1203,10 @@ int xc_linux_save(int xc_handle, int io_
  9.1044                          (unsigned long)ctxt.user_regs.edx);
  9.1045              }
  9.1046  
  9.1047 -            if (xc_shadow_control(xc_handle, dom, 
  9.1048 -                                  XEN_DOMCTL_SHADOW_OP_CLEAN, to_send, 
  9.1049 -                                  p2m_size, NULL, 0, &stats) != p2m_size) {
  9.1050 +            if ( xc_shadow_control(xc_handle, dom, 
  9.1051 +                                   XEN_DOMCTL_SHADOW_OP_CLEAN, to_send, 
  9.1052 +                                   p2m_size, NULL, 0, &stats) != p2m_size )
  9.1053 +            {
  9.1054                  ERROR("Error flushing shadow PT");
  9.1055                  goto out;
  9.1056              }
  9.1057 @@ -1178,7 +1216,7 @@ int xc_linux_save(int xc_handle, int io_
  9.1058              print_stats(xc_handle, dom, sent_this_iter, &stats, 1);
  9.1059  
  9.1060          }
  9.1061 -    } /* end of while 1 */
  9.1062 +    } /* end of infinite for loop */
  9.1063  
  9.1064      DPRINTF("All memory is saved\n");
  9.1065  
  9.1066 @@ -1189,20 +1227,23 @@ int xc_linux_save(int xc_handle, int io_
  9.1067              uint64_t vcpumap;
  9.1068          } chunk = { -2, info.max_vcpu_id };
  9.1069  
  9.1070 -        if (info.max_vcpu_id >= 64) {
  9.1071 +        if ( info.max_vcpu_id >= 64 )
  9.1072 +        {
  9.1073              ERROR("Too many VCPUS in guest!");
  9.1074              goto out;
  9.1075          }
  9.1076  
  9.1077 -        for (i = 1; i <= info.max_vcpu_id; i++) {
  9.1078 +        for ( i = 1; i <= info.max_vcpu_id; i++ )
  9.1079 +        {
  9.1080              xc_vcpuinfo_t vinfo;
  9.1081 -            if ((xc_vcpu_getinfo(xc_handle, dom, i, &vinfo) == 0) &&
  9.1082 -                vinfo.online)
  9.1083 +            if ( (xc_vcpu_getinfo(xc_handle, dom, i, &vinfo) == 0) &&
  9.1084 +                 vinfo.online )
  9.1085                  vcpumap |= 1ULL << i;
  9.1086          }
  9.1087  
  9.1088          chunk.vcpumap = vcpumap;
  9.1089 -        if(!write_exact(io_fd, &chunk, sizeof(chunk))) {
  9.1090 +        if ( !write_exact(io_fd, &chunk, sizeof(chunk)) )
  9.1091 +        {
  9.1092              ERROR("Error when writing to state file (errno %d)", errno);
  9.1093              goto out;
  9.1094          }
  9.1095 @@ -1210,7 +1251,8 @@ int xc_linux_save(int xc_handle, int io_
  9.1096  
  9.1097      /* Zero terminate */
  9.1098      i = 0;
  9.1099 -    if (!write_exact(io_fd, &i, sizeof(int))) {
  9.1100 +    if ( !write_exact(io_fd, &i, sizeof(int)) )
  9.1101 +    {
  9.1102          ERROR("Error when writing to state file (6') (errno %d)", errno);
  9.1103          goto out;
  9.1104      }
  9.1105 @@ -1220,24 +1262,28 @@ int xc_linux_save(int xc_handle, int io_
  9.1106          unsigned int i,j;
  9.1107          unsigned long pfntab[1024];
  9.1108  
  9.1109 -        for (i = 0, j = 0; i < p2m_size; i++) {
  9.1110 -            if (!is_mapped(live_p2m[i]))
  9.1111 +        for ( i = 0, j = 0; i < p2m_size; i++ )
  9.1112 +        {
  9.1113 +            if ( !is_mapped(live_p2m[i]) )
  9.1114                  j++;
  9.1115          }
  9.1116  
  9.1117 -        if(!write_exact(io_fd, &j, sizeof(unsigned int))) {
  9.1118 +        if ( !write_exact(io_fd, &j, sizeof(unsigned int)) )
  9.1119 +        {
  9.1120              ERROR("Error when writing to state file (6a) (errno %d)", errno);
  9.1121              goto out;
  9.1122          }
  9.1123  
  9.1124 -        for (i = 0, j = 0; i < p2m_size; ) {
  9.1125 -
  9.1126 -            if (!is_mapped(live_p2m[i]))
  9.1127 +        for ( i = 0, j = 0; i < p2m_size; )
  9.1128 +        {
  9.1129 +            if ( !is_mapped(live_p2m[i]) )
  9.1130                  pfntab[j++] = i;
  9.1131  
  9.1132              i++;
  9.1133 -            if (j == 1024 || i == p2m_size) {
  9.1134 -                if(!write_exact(io_fd, &pfntab, sizeof(unsigned long)*j)) {
  9.1135 +            if ( (j == 1024) || (i == p2m_size) )
  9.1136 +            {
  9.1137 +                if ( !write_exact(io_fd, &pfntab, sizeof(unsigned long)*j) )
  9.1138 +                {
  9.1139                      ERROR("Error when writing to state file (6b) (errno %d)",
  9.1140                            errno);
  9.1141                      goto out;
  9.1142 @@ -1245,34 +1291,39 @@ int xc_linux_save(int xc_handle, int io_
  9.1143                  j = 0;
  9.1144              }
  9.1145          }
  9.1146 -
  9.1147      }
  9.1148  
  9.1149      /* Canonicalise the suspend-record frame number. */
  9.1150 -    if ( !translate_mfn_to_pfn(&ctxt.user_regs.edx) ){
  9.1151 +    if ( !translate_mfn_to_pfn(&ctxt.user_regs.edx) )
  9.1152 +    {
  9.1153          ERROR("Suspend record is not in range of pseudophys map");
  9.1154          goto out;
  9.1155      }
  9.1156  
  9.1157 -    for (i = 0; i <= info.max_vcpu_id; i++) {
  9.1158 -        if (!(vcpumap & (1ULL << i)))
  9.1159 +    for ( i = 0; i <= info.max_vcpu_id; i++ )
  9.1160 +    {
  9.1161 +        if ( !(vcpumap & (1ULL << i)) )
  9.1162              continue;
  9.1163  
  9.1164 -        if ((i != 0) && xc_vcpu_getcontext(xc_handle, dom, i, &ctxt)) {
  9.1165 +        if ( (i != 0) && xc_vcpu_getcontext(xc_handle, dom, i, &ctxt) )
  9.1166 +        {
  9.1167              ERROR("No context for VCPU%d", i);
  9.1168              goto out;
  9.1169          }
  9.1170  
  9.1171          /* Canonicalise each GDT frame number. */
  9.1172 -        for ( j = 0; (512*j) < ctxt.gdt_ents; j++ ) {
  9.1173 -            if ( !translate_mfn_to_pfn(&ctxt.gdt_frames[j]) ) {
  9.1174 +        for ( j = 0; (512*j) < ctxt.gdt_ents; j++ )
  9.1175 +        {
  9.1176 +            if ( !translate_mfn_to_pfn(&ctxt.gdt_frames[j]) )
  9.1177 +            {
  9.1178                  ERROR("GDT frame is not in range of pseudophys map");
  9.1179                  goto out;
  9.1180              }
  9.1181          }
  9.1182  
  9.1183          /* Canonicalise the page table base pointer. */
  9.1184 -        if ( !MFN_IS_IN_PSEUDOPHYS_MAP(xen_cr3_to_pfn(ctxt.ctrlreg[3])) ) {
  9.1185 +        if ( !MFN_IS_IN_PSEUDOPHYS_MAP(xen_cr3_to_pfn(ctxt.ctrlreg[3])) )
  9.1186 +        {
  9.1187              ERROR("PT base is not in range of pseudophys map");
  9.1188              goto out;
  9.1189          }
  9.1190 @@ -1282,7 +1333,8 @@ int xc_linux_save(int xc_handle, int io_
  9.1191          /* Guest pagetable (x86/64) stored in otherwise-unused CR1. */
  9.1192          if ( (pt_levels == 4) && ctxt.ctrlreg[1] )
  9.1193          {
  9.1194 -            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(xen_cr3_to_pfn(ctxt.ctrlreg[1])) ) {
  9.1195 +            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(xen_cr3_to_pfn(ctxt.ctrlreg[1])) )
  9.1196 +            {
  9.1197                  ERROR("PT base is not in range of pseudophys map");
  9.1198                  goto out;
  9.1199              }
  9.1200 @@ -1291,7 +1343,8 @@ int xc_linux_save(int xc_handle, int io_
  9.1201                  xen_pfn_to_cr3(mfn_to_pfn(xen_cr3_to_pfn(ctxt.ctrlreg[1])));
  9.1202          }
  9.1203  
  9.1204 -        if (!write_exact(io_fd, &ctxt, sizeof(ctxt))) {
  9.1205 +        if ( !write_exact(io_fd, &ctxt, sizeof(ctxt)) )
  9.1206 +        {
  9.1207              ERROR("Error when writing to state file (1) (errno %d)", errno);
  9.1208              goto out;
  9.1209          }
  9.1210 @@ -1302,7 +1355,8 @@ int xc_linux_save(int xc_handle, int io_
  9.1211       */
  9.1212      memcpy(page, live_shinfo, PAGE_SIZE);
  9.1213      ((shared_info_t *)page)->arch.pfn_to_mfn_frame_list_list = 0;
  9.1214 -    if (!write_exact(io_fd, page, PAGE_SIZE)) {
  9.1215 +    if ( !write_exact(io_fd, page, PAGE_SIZE) )
  9.1216 +    {
  9.1217          ERROR("Error when writing to state file (1) (errno %d)", errno);
  9.1218          goto out;
  9.1219      }
  9.1220 @@ -1312,30 +1366,30 @@ int xc_linux_save(int xc_handle, int io_
  9.1221  
  9.1222   out:
  9.1223  
  9.1224 -    if (live) {
  9.1225 -        if(xc_shadow_control(xc_handle, dom, 
  9.1226 -                             XEN_DOMCTL_SHADOW_OP_OFF,
  9.1227 -                             NULL, 0, NULL, 0, NULL) < 0) {
  9.1228 +    if ( live )
  9.1229 +    {
  9.1230 +        if ( xc_shadow_control(xc_handle, dom, 
  9.1231 +                               XEN_DOMCTL_SHADOW_OP_OFF,
  9.1232 +                               NULL, 0, NULL, 0, NULL) < 0 )
  9.1233              DPRINTF("Warning - couldn't disable shadow mode");
  9.1234 -        }
  9.1235      }
  9.1236  
  9.1237 -    // flush last write and discard cache for file
  9.1238 +    /* Flush last write and discard cache for file. */
  9.1239      discard_file_cache(io_fd, 1 /* flush */);
  9.1240  
  9.1241 -    if (live_shinfo)
  9.1242 +    if ( live_shinfo )
  9.1243          munmap(live_shinfo, PAGE_SIZE);
  9.1244  
  9.1245 -    if (live_p2m_frame_list_list)
  9.1246 +    if ( live_p2m_frame_list_list )
  9.1247          munmap(live_p2m_frame_list_list, PAGE_SIZE);
  9.1248  
  9.1249 -    if (live_p2m_frame_list)
  9.1250 +    if ( live_p2m_frame_list )
  9.1251          munmap(live_p2m_frame_list, P2M_FLL_ENTRIES * PAGE_SIZE);
  9.1252  
  9.1253 -    if (live_p2m)
  9.1254 +    if ( live_p2m )
  9.1255          munmap(live_p2m, ROUNDUP(p2m_size * sizeof(xen_pfn_t), PAGE_SHIFT));
  9.1256  
  9.1257 -    if (live_m2p)
  9.1258 +    if ( live_m2p )
  9.1259          munmap(live_m2p, M2P_SIZE(max_mfn));
  9.1260  
  9.1261      free(pfn_type);
    10.1 --- a/tools/libxc/xc_private.c	Mon Apr 09 13:39:35 2007 -0600
    10.2 +++ b/tools/libxc/xc_private.c	Mon Apr 09 13:40:25 2007 -0600
    10.3 @@ -145,7 +145,7 @@ int xc_mmuext_op(
    10.4      return ret;
    10.5  }
    10.6  
    10.7 -static int flush_mmu_updates(int xc_handle, xc_mmu_t *mmu)
    10.8 +static int flush_mmu_updates(int xc_handle, struct xc_mmu *mmu)
    10.9  {
   10.10      int err = 0;
   10.11      DECLARE_HYPERCALL;
   10.12 @@ -180,9 +180,9 @@ static int flush_mmu_updates(int xc_hand
   10.13      return err;
   10.14  }
   10.15  
   10.16 -xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom)
   10.17 +struct xc_mmu *xc_alloc_mmu_updates(int xc_handle, domid_t dom)
   10.18  {
   10.19 -    xc_mmu_t *mmu = malloc(sizeof(xc_mmu_t));
   10.20 +    struct xc_mmu *mmu = malloc(sizeof(*mmu));
   10.21      if ( mmu == NULL )
   10.22          return mmu;
   10.23      mmu->idx     = 0;
   10.24 @@ -190,7 +190,7 @@ xc_mmu_t *xc_init_mmu_updates(int xc_han
   10.25      return mmu;
   10.26  }
   10.27  
   10.28 -int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
   10.29 +int xc_add_mmu_update(int xc_handle, struct xc_mmu *mmu,
   10.30                        unsigned long long ptr, unsigned long long val)
   10.31  {
   10.32      mmu->updates[mmu->idx].ptr = ptr;
   10.33 @@ -202,7 +202,7 @@ int xc_add_mmu_update(int xc_handle, xc_
   10.34      return 0;
   10.35  }
   10.36  
   10.37 -int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu)
   10.38 +int xc_flush_mmu_updates(int xc_handle, struct xc_mmu *mmu)
   10.39  {
   10.40      return flush_mmu_updates(xc_handle, mmu);
   10.41  }
    11.1 --- a/tools/libxc/xc_private.h	Mon Apr 09 13:39:35 2007 -0600
    11.2 +++ b/tools/libxc/xc_private.h	Mon Apr 09 13:40:25 2007 -0600
    11.3 @@ -168,4 +168,16 @@ void bitmap_byte_to_64(uint64_t *lp, con
    11.4  /* Optionally flush file to disk and discard page cache */
    11.5  void discard_file_cache(int fd, int flush);
    11.6  
    11.7 +#define MAX_MMU_UPDATES 1024
    11.8 +struct xc_mmu {
    11.9 +    mmu_update_t updates[MAX_MMU_UPDATES];
   11.10 +    int          idx;
   11.11 +    domid_t      subject;
   11.12 +};
   11.13 +/* Structure returned by xc_alloc_mmu_updates must be free()'ed by caller. */
   11.14 +struct xc_mmu *xc_alloc_mmu_updates(int xc_handle, domid_t dom);
   11.15 +int xc_add_mmu_update(int xc_handle, struct xc_mmu *mmu,
   11.16 +                   unsigned long long ptr, unsigned long long val);
   11.17 +int xc_flush_mmu_updates(int xc_handle, struct xc_mmu *mmu);
   11.18 +
   11.19  #endif /* __XC_PRIVATE_H__ */
    12.1 --- a/tools/libxc/xenctrl.h	Mon Apr 09 13:39:35 2007 -0600
    12.2 +++ b/tools/libxc/xenctrl.h	Mon Apr 09 13:40:25 2007 -0600
    12.3 @@ -666,21 +666,6 @@ int xc_sysctl(int xc_handle, struct xen_
    12.4  
    12.5  int xc_version(int xc_handle, int cmd, void *arg);
    12.6  
    12.7 -/*
    12.8 - * MMU updates.
    12.9 - */
   12.10 -#define MAX_MMU_UPDATES 1024
   12.11 -struct xc_mmu {
   12.12 -    mmu_update_t updates[MAX_MMU_UPDATES];
   12.13 -    int          idx;
   12.14 -    domid_t      subject;
   12.15 -};
   12.16 -typedef struct xc_mmu xc_mmu_t;
   12.17 -xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom);
   12.18 -int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
   12.19 -                   unsigned long long ptr, unsigned long long val);
   12.20 -int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu);
   12.21 -
   12.22  int xc_acm_op(int xc_handle, int cmd, void *arg, unsigned long arg_size);
   12.23  
   12.24  /*
    13.1 --- a/unmodified_drivers/linux-2.6/compat-include/xen/platform-compat.h	Mon Apr 09 13:39:35 2007 -0600
    13.2 +++ b/unmodified_drivers/linux-2.6/compat-include/xen/platform-compat.h	Mon Apr 09 13:40:25 2007 -0600
    13.3 @@ -96,6 +96,10 @@ extern char *kasprintf(gfp_t gfp, const 
    13.4  #define handle_sysrq(x,y,z) handle_sysrq(x,y)
    13.5  #endif
    13.6  
    13.7 +#if defined(_PAGE_PRESENT) && !defined(_PAGE_NX)
    13.8 +#define _PAGE_NX 0
    13.9 +#endif
   13.10 +
   13.11  /*
   13.12   * This variable at present is referenced by netfront, but only in code that
   13.13   * is dead when running in hvm guests. To detect potential active uses of it
    14.1 --- a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c	Mon Apr 09 13:39:35 2007 -0600
    14.2 +++ b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c	Mon Apr 09 13:40:25 2007 -0600
    14.3 @@ -1,6 +1,6 @@
    14.4  /******************************************************************************
    14.5 - * evtchn-pci.c
    14.6 - * xen event channel fake PCI device driver
    14.7 + * platform-pci.c
    14.8 + * Xen platform PCI device driver
    14.9   * Copyright (C) 2005, Intel Corporation.
   14.10   *
   14.11   * This program is free software; you can redistribute it and/or modify it
    15.1 --- a/xen/arch/x86/mm/shadow/common.c	Mon Apr 09 13:39:35 2007 -0600
    15.2 +++ b/xen/arch/x86/mm/shadow/common.c	Mon Apr 09 13:40:25 2007 -0600
    15.3 @@ -1245,9 +1245,6 @@ static unsigned int sh_set_allocation(st
    15.4              list_del(&sp->list);
    15.5              d->arch.paging.shadow.free_pages -= 1<<SHADOW_MAX_ORDER;
    15.6              d->arch.paging.shadow.total_pages -= 1<<SHADOW_MAX_ORDER;
    15.7 -            for ( j = 0; j < 1<<SHADOW_MAX_ORDER; j++ ) 
    15.8 -                /* Keep the page allocator happy */
    15.9 -                ((struct page_info *)sp)[j].count_info = 0;
   15.10              free_domheap_pages((struct page_info *)sp, SHADOW_MAX_ORDER);
   15.11          }
   15.12  
    16.1 --- a/xen/arch/x86/traps.c	Mon Apr 09 13:39:35 2007 -0600
    16.2 +++ b/xen/arch/x86/traps.c	Mon Apr 09 13:40:25 2007 -0600
    16.3 @@ -270,21 +270,6 @@ void show_stack(struct cpu_user_regs *re
    16.4      show_trace(regs);
    16.5  }
    16.6  
    16.7 -void show_xen_trace()
    16.8 -{
    16.9 -    struct cpu_user_regs regs;
   16.10 -#ifdef __x86_64
   16.11 -    __asm__("movq %%rsp,%0" : "=m" (regs.rsp));
   16.12 -    __asm__("movq %%rbp,%0" : "=m" (regs.rbp));
   16.13 -    __asm__("leaq 0(%%rip),%0" : "=a" (regs.rip));
   16.14 -#else
   16.15 -    __asm__("movl %%esp,%0" : "=m" (regs.esp));
   16.16 -    __asm__("movl %%ebp,%0" : "=m" (regs.ebp));
   16.17 -    __asm__("call 1f; 1: popl %0" : "=a" (regs.eip));
   16.18 -#endif
   16.19 -    show_trace(&regs);
   16.20 -}
   16.21 -
   16.22  void show_stack_overflow(unsigned int cpu, unsigned long esp)
   16.23  {
   16.24  #ifdef MEMORY_GUARD
    17.1 --- a/xen/arch/x86/x86_32/entry.S	Mon Apr 09 13:39:35 2007 -0600
    17.2 +++ b/xen/arch/x86/x86_32/entry.S	Mon Apr 09 13:40:25 2007 -0600
    17.3 @@ -283,6 +283,7 @@ bad_hypercall:
    17.4  /* %edx == trap_bounce, %ebx == struct vcpu                       */
    17.5  /* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
    17.6  create_bounce_frame:
    17.7 +        ASSERT_INTERRUPTS_ENABLED
    17.8          movl UREGS_eflags+4(%esp),%ecx
    17.9          movb UREGS_cs+4(%esp),%cl
   17.10          testl $(2|X86_EFLAGS_VM),%ecx
    18.1 --- a/xen/arch/x86/x86_64/compat/entry.S	Mon Apr 09 13:39:35 2007 -0600
    18.2 +++ b/xen/arch/x86/x86_64/compat/entry.S	Mon Apr 09 13:40:25 2007 -0600
    18.3 @@ -137,6 +137,7 @@ compat_bad_hypercall:
    18.4  
    18.5  /* %rbx: struct vcpu, interrupts disabled */
    18.6  compat_restore_all_guest:
    18.7 +        ASSERT_INTERRUPTS_DISABLED
    18.8          RESTORE_ALL
    18.9          addq  $8,%rsp
   18.10  .Lft0:  iretq
   18.11 @@ -188,13 +189,14 @@ ENTRY(compat_post_handle_exception)
   18.12  
   18.13  ENTRY(compat_int80_direct_trap)
   18.14          call  compat_create_bounce_frame
   18.15 -        jmp   compat_restore_all_guest
   18.16 +        jmp   compat_test_all_events
   18.17  
   18.18  /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
   18.19  /*   {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]}                             */
   18.20  /* %rdx: trap_bounce, %rbx: struct vcpu                                  */
   18.21  /* On return only %rbx is guaranteed non-clobbered.                      */
   18.22  compat_create_bounce_frame:
   18.23 +        ASSERT_INTERRUPTS_ENABLED
   18.24          mov   %fs,%edi
   18.25          testb $2,UREGS_cs+8(%rsp)
   18.26          jz    1f
    19.1 --- a/xen/arch/x86/x86_64/entry.S	Mon Apr 09 13:39:35 2007 -0600
    19.2 +++ b/xen/arch/x86/x86_64/entry.S	Mon Apr 09 13:40:25 2007 -0600
    19.3 @@ -38,6 +38,7 @@ 1:      call  create_bounce_frame
    19.4  
    19.5  /* %rbx: struct vcpu, interrupts disabled */
    19.6  restore_all_guest:
    19.7 +        ASSERT_INTERRUPTS_DISABLED
    19.8          RESTORE_ALL
    19.9          testw $TRAP_syscall,4(%rsp)
   19.10          jz    iret_exit_to_guest
   19.11 @@ -230,7 +231,7 @@ ENTRY(int80_direct_trap)
   19.12  
   19.13          /* Check that the callback is non-null. */
   19.14          leaq  VCPU_int80_bounce(%rbx),%rdx
   19.15 -        cmp   $0, TRAPBOUNCE_flags(%rdx)
   19.16 +        cmp   $0,TRAPBOUNCE_flags(%rdx)
   19.17          jz    int80_slow_path
   19.18  
   19.19          movq  VCPU_domain(%rbx),%rax
   19.20 @@ -238,7 +239,7 @@ ENTRY(int80_direct_trap)
   19.21          jnz   compat_int80_direct_trap
   19.22  
   19.23          call  create_bounce_frame
   19.24 -        jmp   restore_all_guest
   19.25 +        jmp   test_all_events
   19.26  
   19.27  int80_slow_path:
   19.28          /* 
   19.29 @@ -256,6 +257,7 @@ int80_slow_path:
   19.30  /* %rdx: trap_bounce, %rbx: struct vcpu                           */
   19.31  /* On return only %rbx is guaranteed non-clobbered.                      */
   19.32  create_bounce_frame:
   19.33 +        ASSERT_INTERRUPTS_ENABLED
   19.34          testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
   19.35          jnz   1f
   19.36          /* Push new frame at registered guest-OS stack base. */
    20.1 --- a/xen/arch/x86/x86_64/traps.c	Mon Apr 09 13:39:35 2007 -0600
    20.2 +++ b/xen/arch/x86/x86_64/traps.c	Mon Apr 09 13:40:25 2007 -0600
    20.3 @@ -170,7 +170,8 @@ asmlinkage void do_double_fault(struct c
    20.4             regs->r9,  regs->r10, regs->r11);
    20.5      printk("r12: %016lx   r13: %016lx   r14: %016lx\n",
    20.6             regs->r12, regs->r13, regs->r14);
    20.7 -    printk("r15: %016lx\n", regs->r15);
    20.8 +    printk("r15: %016lx    cs: %016lx    ss: %016lx\n",
    20.9 +           regs->r15, (long)regs->cs, (long)regs->ss);
   20.10      show_stack_overflow(cpu, regs->rsp);
   20.11  
   20.12      panic("DOUBLE FAULT -- system shutdown\n");
   20.13 @@ -260,11 +261,14 @@ void __init percpu_traps_init(void)
   20.14          idt_table[TRAP_double_fault].a |= 1UL << 32; /* IST1 */
   20.15          idt_table[TRAP_nmi].a          |= 2UL << 32; /* IST2 */
   20.16  
   20.17 -#ifdef CONFIG_COMPAT
   20.18 -        /* The hypercall entry vector is only accessible from ring 1. */
   20.19 +        /*
   20.20 +         * The 32-on-64 hypercall entry vector is only accessible from ring 1.
   20.21 +         * Also note that this is a trap gate, not an interrupt gate.
   20.22 +         */
   20.23          _set_gate(idt_table+HYPERCALL_VECTOR, 15, 1, &compat_hypercall);
   20.24 +
   20.25 +        /* Fast trap for int80 (faster than taking the #GP-fixup path). */
   20.26          _set_gate(idt_table+0x80, 15, 3, &int80_direct_trap);
   20.27 -#endif
   20.28      }
   20.29  
   20.30      stack_bottom = (char *)get_stack_bottom();
    21.1 --- a/xen/arch/x86/x86_emulate.c	Mon Apr 09 13:39:35 2007 -0600
    21.2 +++ b/xen/arch/x86/x86_emulate.c	Mon Apr 09 13:40:25 2007 -0600
    21.3 @@ -2413,7 +2413,7 @@ x86_emulate(
    21.4      goto writeback;
    21.5  
    21.6   cannot_emulate:
    21.7 -#ifdef __XEN__
    21.8 +#if 0
    21.9      gdprintk(XENLOG_DEBUG, "Instr:");
   21.10      for ( ea.mem.off = ctxt->regs->eip; ea.mem.off < _regs.eip; ea.mem.off++ )
   21.11      {
    22.1 --- a/xen/common/page_alloc.c	Mon Apr 09 13:39:35 2007 -0600
    22.2 +++ b/xen/common/page_alloc.c	Mon Apr 09 13:40:25 2007 -0600
    22.3 @@ -445,7 +445,19 @@ static void free_heap_pages(
    22.4  
    22.5      for ( i = 0; i < (1 << order); i++ )
    22.6      {
    22.7 -        BUG_ON(pg[i].count_info != 0);
    22.8 +        /*
    22.9 +         * Cannot assume that count_info == 0, as there are some corner cases
   22.10 +         * where it isn't the case and yet it isn't a bug:
   22.11 +         *  1. page_get_owner() is NULL
   22.12 +         *  2. page_get_owner() is a domain that was never accessible by
   22.13 +         *     its domid (e.g., failed to fully construct the domain).
   22.14 +         *  3. page was never addressable by the guest (e.g., it's an
   22.15 +         *     auto-translate-physmap guest and the page was never included
   22.16 +         *     in its pseudophysical address space).
   22.17 +         * In all the above cases there can be no guest mappings of this page.
   22.18 +         */
   22.19 +        pg[i].count_info = 0;
   22.20 +
   22.21          if ( (d = page_get_owner(&pg[i])) != NULL )
   22.22          {
   22.23              pg[i].tlbflush_timestamp = tlbflush_current_time();
    23.1 --- a/xen/include/asm-x86/desc.h	Mon Apr 09 13:39:35 2007 -0600
    23.2 +++ b/xen/include/asm-x86/desc.h	Mon Apr 09 13:40:25 2007 -0600
    23.3 @@ -106,7 +106,7 @@
    23.4      ((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */        \
    23.5       ((sel) == (!IS_COMPAT(d) ?                                         \
    23.6                  FLAT_KERNEL_CS :                /* Xen default seg? */  \
    23.7 -                FLAT_COMPAT_KERNEL_CS)) ||      /* Xen default compat seg? */  \
    23.8 +                FLAT_COMPAT_KERNEL_CS)) ||                              \
    23.9       ((sel) & 4))                               /* LDT seg? */
   23.10  
   23.11  #endif /* __ASSEMBLY__ */
    24.1 --- a/xen/include/asm-x86/processor.h	Mon Apr 09 13:39:35 2007 -0600
    24.2 +++ b/xen/include/asm-x86/processor.h	Mon Apr 09 13:40:25 2007 -0600
    24.3 @@ -552,7 +552,6 @@ extern always_inline void prefetchw(cons
    24.4  #endif
    24.5  
    24.6  void show_stack(struct cpu_user_regs *regs);
    24.7 -void show_xen_trace(void);
    24.8  void show_stack_overflow(unsigned int cpu, unsigned long esp);
    24.9  void show_registers(struct cpu_user_regs *regs);
   24.10  void show_execution_state(struct cpu_user_regs *regs);
    25.1 --- a/xen/include/asm-x86/x86_32/asm_defns.h	Mon Apr 09 13:39:35 2007 -0600
    25.2 +++ b/xen/include/asm-x86/x86_32/asm_defns.h	Mon Apr 09 13:40:25 2007 -0600
    25.3 @@ -8,10 +8,20 @@
    25.4  #define SETUP_EXCEPTION_FRAME_POINTER           \
    25.5          movl  %esp,%ebp;                        \
    25.6          notl  %ebp
    25.7 +#define ASSERT_INTERRUPT_STATUS(x)              \
    25.8 +        pushf;                                  \
    25.9 +        testb $X86_EFLAGS_IF>>8,1(%esp);        \
   25.10 +        j##x  1f;                               \
   25.11 +        ud2a;                                   \
   25.12 +1:      addl  $4,%esp;
   25.13  #else
   25.14  #define SETUP_EXCEPTION_FRAME_POINTER
   25.15 +#define ASSERT_INTERRUPT_STATUS(x)
   25.16  #endif
   25.17  
   25.18 +#define ASSERT_INTERRUPTS_ENABLED  ASSERT_INTERRUPT_STATUS(nz)
   25.19 +#define ASSERT_INTERRUPTS_DISABLED ASSERT_INTERRUPT_STATUS(z)
   25.20 +
   25.21  #define __SAVE_ALL_PRE                                  \
   25.22          cld;                                            \
   25.23          pushl %eax;                                     \
    26.1 --- a/xen/include/asm-x86/x86_64/asm_defns.h	Mon Apr 09 13:39:35 2007 -0600
    26.2 +++ b/xen/include/asm-x86/x86_64/asm_defns.h	Mon Apr 09 13:40:25 2007 -0600
    26.3 @@ -8,10 +8,20 @@
    26.4  #define SETUP_EXCEPTION_FRAME_POINTER           \
    26.5          movq  %rsp,%rbp;                        \
    26.6          notq  %rbp
    26.7 +#define ASSERT_INTERRUPT_STATUS(x)              \
    26.8 +        pushf;                                  \
    26.9 +        testb $X86_EFLAGS_IF>>8,1(%rsp);        \
   26.10 +        j##x  1f;                               \
   26.11 +        ud2a;                                   \
   26.12 +1:      addq  $8,%rsp;
   26.13  #else
   26.14  #define SETUP_EXCEPTION_FRAME_POINTER
   26.15 +#define ASSERT_INTERRUPT_STATUS(x)
   26.16  #endif
   26.17  
   26.18 +#define ASSERT_INTERRUPTS_ENABLED  ASSERT_INTERRUPT_STATUS(nz)
   26.19 +#define ASSERT_INTERRUPTS_DISABLED ASSERT_INTERRUPT_STATUS(z)
   26.20 +
   26.21  #define SAVE_ALL                                \
   26.22          cld;                                    \
   26.23          pushq %rdi;                             \
    27.1 --- a/xen/include/public/domctl.h	Mon Apr 09 13:39:35 2007 -0600
    27.2 +++ b/xen/include/public/domctl.h	Mon Apr 09 13:40:25 2007 -0600
    27.3 @@ -123,15 +123,15 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getme
    27.4  #define XEN_DOMCTL_getpageframeinfo   7
    27.5  
    27.6  #define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28
    27.7 -#define XEN_DOMCTL_PFINFO_NOTAB   (0x0<<28)
    27.8 -#define XEN_DOMCTL_PFINFO_L1TAB   (0x1<<28)
    27.9 -#define XEN_DOMCTL_PFINFO_L2TAB   (0x2<<28)
   27.10 -#define XEN_DOMCTL_PFINFO_L3TAB   (0x3<<28)
   27.11 -#define XEN_DOMCTL_PFINFO_L4TAB   (0x4<<28)
   27.12 -#define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7<<28)
   27.13 -#define XEN_DOMCTL_PFINFO_LPINTAB (0x1<<31)
   27.14 -#define XEN_DOMCTL_PFINFO_XTAB    (0xf<<28) /* invalid page */
   27.15 -#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xf<<28)
   27.16 +#define XEN_DOMCTL_PFINFO_NOTAB   (0x0U<<28)
   27.17 +#define XEN_DOMCTL_PFINFO_L1TAB   (0x1U<<28)
   27.18 +#define XEN_DOMCTL_PFINFO_L2TAB   (0x2U<<28)
   27.19 +#define XEN_DOMCTL_PFINFO_L3TAB   (0x3U<<28)
   27.20 +#define XEN_DOMCTL_PFINFO_L4TAB   (0x4U<<28)
   27.21 +#define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28)
   27.22 +#define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
   27.23 +#define XEN_DOMCTL_PFINFO_XTAB    (0xfU<<28) /* invalid page */
   27.24 +#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
   27.25  
   27.26  struct xen_domctl_getpageframeinfo {
   27.27      /* IN variables. */