ia64/xen-unstable

changeset 4306:016ca3ca12ab

bitkeeper revision 1.1261.1.1 (424338b72O4irc499IhwoNUikHMy5Q)

Minor cleanups, in preparation for merging with mainline.
Removal of some debug hooks.

Signed-off-by: michael.fetterman@cl.cam.ac.uk
author mafetter@fleming.research
date Thu Mar 24 22:01:27 2005 +0000 (2005-03-24)
parents 13032fd25c06
children b2a62a1f2f20
files xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/arch/x86/x86_32/asm-offsets.c xen/arch/x86/x86_32/domain_page.c xen/common/page_alloc.c xen/include/asm-x86/mm.h xen/include/asm-x86/shadow.h xen/include/asm-x86/x86_32/domain_page.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Thu Mar 24 16:48:36 2005 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Thu Mar 24 22:01:27 2005 +0000
     1.3 @@ -238,7 +238,7 @@ void arch_do_createdomain(struct exec_do
     1.4          d->shared_info = (void *)alloc_xenheap_page();
     1.5          memset(d->shared_info, 0, PAGE_SIZE);
     1.6          ed->vcpu_info = &d->shared_info->vcpu_data[ed->eid];
     1.7 -        SHARE_PFN_WITH_DOMAIN2(virt_to_page(d->shared_info), d);
     1.8 +        SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
     1.9          machine_to_phys_mapping[virt_to_phys(d->shared_info) >> 
    1.10                                 PAGE_SHIFT] = INVALID_M2P_ENTRY;
    1.11  
    1.12 @@ -262,7 +262,7 @@ void arch_do_createdomain(struct exec_do
    1.13              mk_l3_pgentry(__pa(d->arch.mm_perdomain_l2) | __PAGE_HYPERVISOR);
    1.14  #endif
    1.15  
    1.16 -        shadow_lock_init(d);
    1.17 +        shadow_lock_init(d);        
    1.18          INIT_LIST_HEAD(&d->arch.free_shadow_frames);
    1.19      }
    1.20  }
     2.1 --- a/xen/arch/x86/domain_build.c	Thu Mar 24 16:48:36 2005 +0000
     2.2 +++ b/xen/arch/x86/domain_build.c	Thu Mar 24 22:01:27 2005 +0000
     2.3 @@ -426,7 +426,7 @@ int construct_dom0(struct domain *d,
     2.4          d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
     2.5      d->shared_info->n_vcpu = smp_num_cpus;
     2.6  
     2.7 -    /* setup monitor table */
     2.8 +    /* Set up monitor table */
     2.9      update_pagetables(ed);
    2.10  
    2.11      /* Install the new page tables. */
    2.12 @@ -472,13 +472,11 @@ int construct_dom0(struct domain *d,
    2.13      for ( pfn = 0; pfn < d->tot_pages; pfn++ )
    2.14      {
    2.15          mfn = pfn + (alloc_start>>PAGE_SHIFT);
    2.16 -#if 0
    2.17  #ifndef NDEBUG
    2.18  #define REVERSE_START ((v_end - dsi.v_start) >> PAGE_SHIFT)
    2.19 -        if ( pfn > REVERSE_START )
    2.20 +        if ( !opt_dom0_translate && (pfn > REVERSE_START) )
    2.21              mfn = (alloc_end>>PAGE_SHIFT) - (pfn - REVERSE_START);
    2.22  #endif
    2.23 -#endif
    2.24          ((u32 *)vphysmap_start)[pfn] = mfn;
    2.25          machine_to_phys_mapping[mfn] = pfn;
    2.26      }
     3.1 --- a/xen/arch/x86/mm.c	Thu Mar 24 16:48:36 2005 +0000
     3.2 +++ b/xen/arch/x86/mm.c	Thu Mar 24 22:01:27 2005 +0000
     3.3 @@ -1088,7 +1088,7 @@ void free_page_type(struct pfn_info *pag
     3.4  }
     3.5  
     3.6  
     3.7 -void _put_page_type(struct pfn_info *page)
     3.8 +void put_page_type(struct pfn_info *page)
     3.9  {
    3.10      u32 nx, x, y = page->u.inuse.type_info;
    3.11  
    3.12 @@ -1143,7 +1143,7 @@ void _put_page_type(struct pfn_info *pag
    3.13  }
    3.14  
    3.15  
    3.16 -int _get_page_type(struct pfn_info *page, u32 type)
    3.17 +int get_page_type(struct pfn_info *page, u32 type)
    3.18  {
    3.19      u32 nx, x, y = page->u.inuse.type_info;
    3.20  
    3.21 @@ -1880,7 +1880,7 @@ int do_mmu_update(
    3.22  
    3.23          case MMU_MACHPHYS_UPDATE:
    3.24  
    3.25 -            // HACK ALERT...  This about this later...
    3.26 +            // HACK ALERT...  Need to think about this some more...
    3.27              //
    3.28              if ( unlikely(shadow_mode_translate(FOREIGNDOM) && IS_PRIV(d)) )
    3.29              {
     4.1 --- a/xen/arch/x86/shadow.c	Thu Mar 24 16:48:36 2005 +0000
     4.2 +++ b/xen/arch/x86/shadow.c	Thu Mar 24 22:01:27 2005 +0000
     4.3 @@ -2,6 +2,7 @@
     4.4   * arch/x86/shadow.c
     4.5   * 
     4.6   * Copyright (c) 2005 Michael A Fetterman
     4.7 + * Based on an earlier implementation by Ian Pratt et al
     4.8   * 
     4.9   * This program is free software; you can redistribute it and/or modify
    4.10   * it under the terms of the GNU General Public License as published by
    4.11 @@ -658,11 +659,6 @@ set_p2m_entry(struct domain *d, unsigned
    4.12  
    4.13      ASSERT( phystab );
    4.14  
    4.15 -#ifdef WATCH_MAP_DOMAIN_CALLERS
    4.16 -    int old_map_domain_mem_noisy = map_domain_mem_noisy;
    4.17 -    map_domain_mem_noisy = 0;
    4.18 -#endif
    4.19 -
    4.20      l2 = map_domain_mem(phystab);
    4.21      if ( !l2_pgentry_val(l2e = l2[l2_table_offset(va)]) )
    4.22      {
    4.23 @@ -685,10 +681,6 @@ set_p2m_entry(struct domain *d, unsigned
    4.24                                              __PAGE_HYPERVISOR);
    4.25      unmap_domain_mem(l1);
    4.26  
    4.27 -#ifdef WATCH_MAP_DOMAIN_CALLERS
    4.28 -    map_domain_mem_noisy = old_map_domain_mem_noisy;
    4.29 -#endif
    4.30 -
    4.31      return 1;
    4.32  }
    4.33  
    4.34 @@ -722,6 +714,21 @@ alloc_p2m_table(struct domain *d)
    4.35          list_ent = page->list.next;
    4.36      }
    4.37  
    4.38 +    list_ent = d->xenpage_list.next;
    4.39 +    while ( list_ent != &d->xenpage_list )
    4.40 +    {
    4.41 +        page = list_entry(list_ent, struct pfn_info, list);
    4.42 +        mfn = page_to_pfn(page);
    4.43 +        pfn = machine_to_phys_mapping[mfn];
    4.44 +        if ( (pfn != INVALID_M2P_ENTRY) &&
    4.45 +             (pfn < (1u<<20)) )
    4.46 +        {
    4.47 +            set_p2m_entry(d, pfn, mfn);
    4.48 +        }
    4.49 +
    4.50 +        list_ent = page->list.next;
    4.51 +    }
    4.52 +
    4.53      return 1;
    4.54  }
    4.55  
     5.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Thu Mar 24 16:48:36 2005 +0000
     5.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Thu Mar 24 22:01:27 2005 +0000
     5.3 @@ -6,7 +6,6 @@
     5.4  
     5.5  #include <xen/config.h>
     5.6  #include <xen/sched.h>
     5.7 -#include <xen/config.h>
     5.8  
     5.9  #define DEFINE(_sym, _val) \
    5.10      __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" (_val) )
    5.11 @@ -76,9 +75,4 @@ void __dummy__(void)
    5.12      BLANK();
    5.13  
    5.14      DEFINE(FIXMAP_apic_base, fix_to_virt(FIX_APIC_BASE));
    5.15 -
    5.16 -#if PERF_COUNTERS
    5.17 -    OFFSET(PERFC_hypercalls, struct perfcounter, hypercalls);
    5.18 -    OFFSET(PERFC_exceptions, struct perfcounter, exceptions);
    5.19 -#endif
    5.20  }
     6.1 --- a/xen/arch/x86/x86_32/domain_page.c	Thu Mar 24 16:48:36 2005 +0000
     6.2 +++ b/xen/arch/x86/x86_32/domain_page.c	Thu Mar 24 22:01:27 2005 +0000
     6.3 @@ -26,11 +26,6 @@ static spinlock_t map_lock = SPIN_LOCK_U
     6.4  /* Use a spare PTE bit to mark entries ready for recycling. */
     6.5  #define READY_FOR_TLB_FLUSH (1<<10)
     6.6  
     6.7 -#ifdef WATCH_MAP_DOMAIN_CALLERS
     6.8 -int map_domain_mem_noisy = 1;
     6.9 -#endif
    6.10 -
    6.11 -
    6.12  static void flush_all_ready_maps(void)
    6.13  {
    6.14      unsigned long *cache = mapcache;
    6.15 @@ -43,7 +38,8 @@ static void flush_all_ready_maps(void)
    6.16      while ( ((unsigned long)(++cache) & ~PAGE_MASK) != 0 );
    6.17  }
    6.18  
    6.19 -void *_map_domain_mem(unsigned long pa)
    6.20 +
    6.21 +void *map_domain_mem(unsigned long pa)
    6.22  {
    6.23      unsigned long va;
    6.24      unsigned int idx, cpu = smp_processor_id();
     7.1 --- a/xen/common/page_alloc.c	Thu Mar 24 16:48:36 2005 +0000
     7.2 +++ b/xen/common/page_alloc.c	Thu Mar 24 22:01:27 2005 +0000
     7.3 @@ -350,11 +350,6 @@ void scrub_heap_pages(void)
     7.4  
     7.5      printk("Scrubbing Free RAM: ");
     7.6  
     7.7 -#ifdef WATCH_MAP_DOMAIN_CALLERS
     7.8 -    int old_map_domain_mem_noisy = map_domain_mem_noisy;
     7.9 -    map_domain_mem_noisy = 0;
    7.10 -#endif
    7.11 -
    7.12      for ( pfn = 0; pfn < (bitmap_size * 8); pfn++ )
    7.13      {
    7.14          /* Every 100MB, print a progress dot and appease the watchdog. */
    7.15 @@ -381,10 +376,6 @@ void scrub_heap_pages(void)
    7.16          spin_unlock_irqrestore(&heap_lock, flags);
    7.17      }
    7.18  
    7.19 -#ifdef WATCH_MAP_DOMAIN_CALLERS
    7.20 -    map_domain_mem_noisy = old_map_domain_mem_noisy;
    7.21 -#endif
    7.22 -
    7.23      printk("done.\n");
    7.24  }
    7.25  
    7.26 @@ -599,7 +590,7 @@ void free_domheap_pages(struct pfn_info 
    7.27                         pg[i].count_info, pg[i].u.inuse.type_info);
    7.28              }
    7.29  
    7.30 -            ASSERT( (pg[i].u.inuse.type_info & PGT_count_mask) == 0 );
    7.31 +            ASSERT((pg[i].u.inuse.type_info & PGT_count_mask) == 0);
    7.32              pg[i].tlbflush_timestamp  = tlbflush_current_time();
    7.33              pg[i].u.free.cpu_mask     = cpu_mask;
    7.34              list_del(&pg[i].list);
     8.1 --- a/xen/include/asm-x86/mm.h	Thu Mar 24 16:48:36 2005 +0000
     8.2 +++ b/xen/include/asm-x86/mm.h	Thu Mar 24 22:01:27 2005 +0000
     8.3 @@ -148,21 +148,6 @@ static inline u32 pickle_domptr(struct d
     8.4          list_add_tail(&(_pfn)->list, &(_dom)->xenpage_list);                \
     8.5          spin_unlock(&(_dom)->page_alloc_lock);                              \
     8.6      } while ( 0 )
     8.7 -#define SHARE_PFN_WITH_DOMAIN2(_pfn, _dom)                                  \
     8.8 -    do {                                                                    \
     8.9 -        page_set_owner((_pfn), (_dom));                                     \
    8.10 -        /* The incremented type count is intended to pin to 'writable'. */  \
    8.11 -        (_pfn)->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;  \
    8.12 -        wmb(); /* install valid domain ptr before updating refcnt. */       \
    8.13 -        spin_lock(&(_dom)->page_alloc_lock);                                \
    8.14 -        /* _dom holds an allocation reference + writable ref */             \
    8.15 -        ASSERT((_pfn)->count_info == 0);                                    \
    8.16 -        (_pfn)->count_info |= PGC_allocated | 2;                            \
    8.17 -        if ( unlikely((_dom)->xenheap_pages++ == 0) )                       \
    8.18 -            get_knownalive_domain(_dom);                                    \
    8.19 -        list_add_tail(&(_pfn)->list, &(_dom)->page_list);                   \
    8.20 -        spin_unlock(&(_dom)->page_alloc_lock);                              \
    8.21 -    } while ( 0 )
    8.22  
    8.23  extern struct pfn_info *frame_table;
    8.24  extern unsigned long frame_table_size;
    8.25 @@ -225,36 +210,8 @@ static inline int get_page(struct pfn_in
    8.26      return 1;
    8.27  }
    8.28  
    8.29 -//#define MFN1_TO_WATCH 0x1d8
    8.30 -#ifdef MFN1_TO_WATCH
    8.31 -#define get_page_type(__p, __t) (                                             \
    8.32 -{                                                                             \
    8.33 -    struct pfn_info *_p = (__p);                                              \
    8.34 -    u32 _t = (__t);                                                           \
    8.35 -    if ( page_to_pfn(_p) == MFN1_TO_WATCH )                                   \
    8.36 -        printk("get_page_type(%x) c=%p ot=%p @ %s:%d in %s\n",                \
    8.37 -               MFN1_TO_WATCH, frame_table[MFN1_TO_WATCH].count_info,          \
    8.38 -               frame_table[MFN1_TO_WATCH].u.inuse.type_info,                  \
    8.39 -               __FILE__, __LINE__, __func__);                                 \
    8.40 -    _get_page_type(_p, _t);                                                   \
    8.41 -})
    8.42 -#define put_page_type(__p) (                                                  \
    8.43 -{                                                                             \
    8.44 -    struct pfn_info *_p = (__p);                                              \
    8.45 -    if ( page_to_pfn(_p) == MFN1_TO_WATCH )                                   \
    8.46 -        printk("put_page_type(%x) c=%p ot=%p @ %s:%d in %s\n",                \
    8.47 -               MFN1_TO_WATCH, frame_table[MFN1_TO_WATCH].count_info,          \
    8.48 -               frame_table[MFN1_TO_WATCH].u.inuse.type_info,                  \
    8.49 -               __FILE__, __LINE__, __func__);                                 \
    8.50 -    _put_page_type(_p);                                                       \
    8.51 -})
    8.52 -#else
    8.53 -#define _get_page_type get_page_type
    8.54 -#define _put_page_type put_page_type
    8.55 -#endif
    8.56 -
    8.57 -void _put_page_type(struct pfn_info *page);
    8.58 -int  _get_page_type(struct pfn_info *page, u32 type);
    8.59 +void put_page_type(struct pfn_info *page);
    8.60 +int  get_page_type(struct pfn_info *page, u32 type);
    8.61  int  get_page_from_l1e(l1_pgentry_t l1e, struct domain *d);
    8.62  void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d);
    8.63  
     9.1 --- a/xen/include/asm-x86/shadow.h	Thu Mar 24 16:48:36 2005 +0000
     9.2 +++ b/xen/include/asm-x86/shadow.h	Thu Mar 24 22:01:27 2005 +0000
     9.3 @@ -2,6 +2,7 @@
     9.4   * include/asm-x86/shadow.h
     9.5   * 
     9.6   * Copyright (c) 2005 Michael A Fetterman
     9.7 + * Based on an earlier implementation by Ian Pratt et al
     9.8   * 
     9.9   * This program is free software; you can redistribute it and/or modify
    9.10   * it under the terms of the GNU General Public License as published by
    10.1 --- a/xen/include/asm-x86/x86_32/domain_page.h	Thu Mar 24 16:48:36 2005 +0000
    10.2 +++ b/xen/include/asm-x86/x86_32/domain_page.h	Thu Mar 24 22:01:27 2005 +0000
    10.3 @@ -13,28 +13,12 @@
    10.4  extern unsigned long *mapcache;
    10.5  #define MAPCACHE_ENTRIES        1024
    10.6  
    10.7 -
    10.8 -//#define WATCH_MAP_DOMAIN_CALLERS 1
    10.9 -#ifdef WATCH_MAP_DOMAIN_CALLERS
   10.10 -extern int map_domain_mem_noisy;
   10.11 -#define map_domain_mem(__mdm_pa) (                                            \
   10.12 -{                                                                             \
   10.13 -    unsigned long _mdm_pa = (__mdm_pa);                                       \
   10.14 -    if ( map_domain_mem_noisy )                                               \
   10.15 -        printk("map_domain_mem(%p) @ %s:%d in %s\n",                          \
   10.16 -               _mdm_pa, __FILE__, __LINE__, __func__);                        \
   10.17 -    _map_domain_mem(_mdm_pa);                                                 \
   10.18 -})
   10.19 -#else
   10.20 -#define _map_domain_mem map_domain_mem
   10.21 -#endif
   10.22 -
   10.23  /*
   10.24   * Maps a given physical address, returning corresponding virtual address.
   10.25   * The entire page containing that VA is now accessible until a 
   10.26   * corresponding call to unmap_domain_mem().
   10.27   */
   10.28 -extern void *_map_domain_mem(unsigned long pa);
   10.29 +extern void *map_domain_mem(unsigned long pa);
   10.30  
   10.31  /*
   10.32   * Pass a VA within a page previously mapped with map_domain_mem().