ia64/xen-unstable

changeset 4464:ce2c7268f935

bitkeeper revision 1.1268 (4252405bviDCnobrL9rMLhSPvqKOKw)

Manual cleanup after merge

Signed-off-by: michael.fetterman@cl.cam.ac.uk
author mafetter@fleming.research
date Tue Apr 05 07:38:03 2005 +0000 (2005-04-05)
parents 445b12a7221a
children 7fc6eac6da3a
files xen/arch/x86/audit.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/include/asm-x86/mm.h xen/include/asm-x86/shadow.h
line diff
     1.1 --- a/xen/arch/x86/audit.c	Mon Apr 04 16:13:17 2005 +0000
     1.2 +++ b/xen/arch/x86/audit.c	Tue Apr 05 07:38:03 2005 +0000
     1.3 @@ -683,7 +683,7 @@ void _audit_domain(struct domain *d, int
     1.4  
     1.5      if ( d != current->domain )
     1.6          domain_pause(d);
     1.7 -    synchronise_pagetables(~0UL);
     1.8 +    sync_lazy_execstate_all();
     1.9  
    1.10      // Maybe we should just be using BIGLOCK?
    1.11      //
     2.1 --- a/xen/arch/x86/mm.c	Mon Apr 04 16:13:17 2005 +0000
     2.2 +++ b/xen/arch/x86/mm.c	Tue Apr 05 07:38:03 2005 +0000
     2.3 @@ -199,8 +199,7 @@ void write_ptbase(struct exec_domain *ed
     2.4      write_cr3(pagetable_val(ed->arch.monitor_table));
     2.5  }
     2.6  
     2.7 -
     2.8 -static inline void invalidate_shadow_ldt(struct exec_domain *d)
     2.9 +void invalidate_shadow_ldt(struct exec_domain *d)
    2.10  {
    2.11      int i;
    2.12      unsigned long pfn;
    2.13 @@ -1306,6 +1305,7 @@ int new_guest_cr3(unsigned long mfn)
    2.14  static void process_deferred_ops(unsigned int cpu)
    2.15  {
    2.16      unsigned int deferred_ops;
    2.17 +    struct domain *d = current->domain;
    2.18  
    2.19      deferred_ops = percpu_info[cpu].deferred_ops;
    2.20      percpu_info[cpu].deferred_ops = 0;
    2.21 @@ -1462,6 +1462,9 @@ int do_mmuext_op(
    2.22              type = PGT_l1_page_table | PGT_va_mutable;
    2.23  
    2.24          pin_page:
    2.25 +            if ( shadow_mode_enabled(FOREIGNDOM) )
    2.26 +                type = PGT_writable_page;
    2.27 +
    2.28              okay = get_page_and_type_from_pagenr(op.mfn, type, FOREIGNDOM);
    2.29              if ( unlikely(!okay) )
    2.30              {
    2.31 @@ -1516,6 +1519,7 @@ int do_mmuext_op(
    2.32  
    2.33          case MMUEXT_NEW_BASEPTR:
    2.34              okay = new_guest_cr3(op.mfn);
    2.35 +            percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
    2.36              break;
    2.37          
    2.38  #ifdef __x86_64__
    2.39 @@ -1542,6 +1546,8 @@ int do_mmuext_op(
    2.40              break;
    2.41      
    2.42          case MMUEXT_INVLPG_LOCAL:
    2.43 +            if ( shadow_mode_enabled(d) )
    2.44 +                shadow_invlpg(ed, op.linear_addr);
    2.45              local_flush_tlb_one(op.linear_addr);
    2.46              break;
    2.47  
    2.48 @@ -1556,17 +1562,25 @@ int do_mmuext_op(
    2.49              }
    2.50              pset = vcpuset_to_pcpuset(d, vset);
    2.51              if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
    2.52 +            {
    2.53 +                BUG_ON(shadow_mode_enabled(d) && ((pset & d->cpuset) != (1<<cpu)));
    2.54                  flush_tlb_mask(pset & d->cpuset);
    2.55 +            }
    2.56              else
    2.57 +            {
    2.58 +                BUG_ON(shadow_mode_enabled(d) && ((pset & d->cpuset) != (1<<cpu)));
    2.59                  flush_tlb_one_mask(pset & d->cpuset, op.linear_addr);
    2.60 +            }
    2.61              break;
    2.62          }
    2.63  
    2.64          case MMUEXT_TLB_FLUSH_ALL:
    2.65 +            BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
    2.66              flush_tlb_mask(d->cpuset);
    2.67              break;
    2.68      
    2.69          case MMUEXT_INVLPG_ALL:
    2.70 +            BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
    2.71              flush_tlb_one_mask(d->cpuset, op.linear_addr);
    2.72              break;
    2.73  
    2.74 @@ -1584,6 +1598,15 @@ int do_mmuext_op(
    2.75  
    2.76          case MMUEXT_SET_LDT:
    2.77          {
    2.78 +            if ( shadow_mode_external(d) )
    2.79 +            {
    2.80 +                // ignore this request from an external domain...
    2.81 +                MEM_LOG("ignoring SET_LDT hypercall from external "
    2.82 +                        "domain %u\n", d->id);
    2.83 +                okay = 0;
    2.84 +                break;
    2.85 +            }
    2.86 +
    2.87              unsigned long ptr  = op.linear_addr;
    2.88              unsigned long ents = op.nr_ents;
    2.89              if ( ((ptr & (PAGE_SIZE-1)) != 0) || 
    2.90 @@ -1732,7 +1755,7 @@ int do_mmu_update(
    2.91      unsigned int foreigndom)
    2.92  {
    2.93      mmu_update_t req;
    2.94 -    unsigned long va = 0, pfn, prev_pfn = 0;
    2.95 +    unsigned long va = 0, mfn, prev_mfn = 0, gpfn;
    2.96      struct pfn_info *page;
    2.97      int rc = 0, okay = 1, i = 0, cpu = smp_processor_id();
    2.98      unsigned int cmd, done = 0;
    2.99 @@ -1747,9 +1770,6 @@ int do_mmu_update(
   2.100      if ( unlikely(shadow_mode_enabled(d)) )
   2.101          check_pagetable(ed, "pre-mmu"); /* debug */
   2.102  
   2.103 -    if ( unlikely(shadow_mode_translate(d)) )
   2.104 -        domain_crash_synchronous();
   2.105 -
   2.106      if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
   2.107      {
   2.108          count &= ~MMU_UPDATE_PREEMPTED;
   2.109 @@ -1875,7 +1895,8 @@ int do_mmu_update(
   2.110                              __mark_dirty(d, mfn);
   2.111  
   2.112                          gpfn = __mfn_to_gpfn(d, mfn);
   2.113 -                        ASSERT(gpfn);
   2.114 +                        ASSERT(VALID_M2P(gpfn));
   2.115 +
   2.116                          if ( page_is_page_table(page) )
   2.117                              shadow_mark_mfn_out_of_sync(ed, gpfn, mfn);
   2.118                      }
   2.119 @@ -2012,7 +2033,10 @@ int update_shadow_va_mapping(unsigned lo
   2.120  
   2.121      if ( unlikely(__put_user(val, &l1_pgentry_val(
   2.122                                   linear_pg_table[l1_linear_offset(va)]))) )
   2.123 -        return -EINVAL;
   2.124 +    {
   2.125 +        rc = -EINVAL;
   2.126 +        goto out;
   2.127 +    }
   2.128  
   2.129      // also need to update the shadow
   2.130  
   2.131 @@ -2027,6 +2051,7 @@ int update_shadow_va_mapping(unsigned lo
   2.132      if ( shadow_mode_log_dirty(d) )
   2.133          mark_dirty(d, va_to_l1mfn(ed, va));
   2.134  
   2.135 + out:
   2.136      shadow_unlock(d);
   2.137      check_pagetable(ed, "post-va"); /* debug */
   2.138  
   2.139 @@ -2658,8 +2683,8 @@ int ptwr_do_page_fault(unsigned long add
   2.140      u32                 l2_idx;
   2.141      struct exec_domain *ed = current;
   2.142  
   2.143 -    // not supported in combination with various shadow modes!
   2.144 -    ASSERT( !shadow_mode_enabled(ed->domain) );
   2.145 +    if ( unlikely(shadow_mode_enabled(ed->domain)) )
   2.146 +        return 0;
   2.147  
   2.148      /*
   2.149       * Attempt to read the PTE that maps the VA being accessed. By checking for
     3.1 --- a/xen/arch/x86/shadow.c	Mon Apr 04 16:13:17 2005 +0000
     3.2 +++ b/xen/arch/x86/shadow.c	Tue Apr 05 07:38:03 2005 +0000
     3.3 @@ -590,10 +590,10 @@ static void alloc_monitor_pagetable(stru
     3.4      struct pfn_info *mmfn_info;
     3.5      struct domain *d = ed->domain;
     3.6  
     3.7 -    ASSERT(!pagetable_val(ed->arch.monitor_table)); /* we should only get called once */
     3.8 +    ASSERT(pagetable_val(ed->arch.monitor_table) == 0);
     3.9  
    3.10      mmfn_info = alloc_domheap_page(NULL);
    3.11 -    ASSERT( mmfn_info ); 
    3.12 +    ASSERT(mmfn_info != NULL);
    3.13  
    3.14      mmfn = (unsigned long) (mmfn_info - frame_table);
    3.15      mpl2e = (l2_pgentry_t *) map_domain_mem(mmfn << PAGE_SHIFT);
    3.16 @@ -2756,7 +2756,7 @@ int _check_pagetable(struct exec_domain 
    3.17      shadow_lock(d);
    3.18  
    3.19      sh_check_name = s;
    3.20 -    SH_VVLOG("%s-PT Audit", s);
    3.21 +    //SH_VVLOG("%s-PT Audit", s);
    3.22      sh_l2_present = sh_l1_present = 0;
    3.23      perfc_incrc(check_pagetable);
    3.24  
    3.25 @@ -2802,8 +2802,10 @@ int _check_pagetable(struct exec_domain 
    3.26      unmap_domain_mem(spl2e);
    3.27      unmap_domain_mem(gpl2e);
    3.28  
    3.29 +#if 0
    3.30      SH_VVLOG("PT verified : l2_present = %d, l1_present = %d",
    3.31               sh_l2_present, sh_l1_present);
    3.32 +#endif
    3.33  
    3.34   out:
    3.35      if ( errors )
     4.1 --- a/xen/include/asm-x86/mm.h	Mon Apr 04 16:13:17 2005 +0000
     4.2 +++ b/xen/include/asm-x86/mm.h	Tue Apr 05 07:38:03 2005 +0000
     4.3 @@ -246,7 +246,8 @@ int check_descriptor(struct desc_struct 
     4.4  #undef  machine_to_phys_mapping
     4.5  #define machine_to_phys_mapping ((u32 *)RDWR_MPT_VIRT_START)
     4.6  #define INVALID_M2P_ENTRY        (~0U)
     4.7 -#define IS_INVALID_M2P_ENTRY(_e) (!!((_e) & (1U<<31)))
     4.8 +#define VALID_M2P(_e)            (!((_e) & (1U<<31)))
     4.9 +#define IS_INVALID_M2P_ENTRY(_e) (!VALID_M2P(_e))
    4.10  
    4.11  /*
    4.12   * The phys_to_machine_mapping is the reversed mapping of MPT for full
     5.1 --- a/xen/include/asm-x86/shadow.h	Mon Apr 04 16:13:17 2005 +0000
     5.2 +++ b/xen/include/asm-x86/shadow.h	Tue Apr 05 07:38:03 2005 +0000
     5.3 @@ -222,11 +222,11 @@ struct out_of_sync_entry {
     5.4  #define SHADOW_SNAPSHOT_ELSEWHERE (-1L)
     5.5  
     5.6  /************************************************************************/
     5.7 -#define SHADOW_DEBUG 0
     5.8 -#define SHADOW_VERBOSE_DEBUG 0
     5.9 -#define SHADOW_VVERBOSE_DEBUG 0
    5.10 -#define SHADOW_HASH_DEBUG 0
    5.11 -#define FULLSHADOW_DEBUG 0
    5.12 +#define SHADOW_DEBUG 1
    5.13 +#define SHADOW_VERBOSE_DEBUG 1
    5.14 +#define SHADOW_VVERBOSE_DEBUG 1
    5.15 +#define SHADOW_HASH_DEBUG 1
    5.16 +#define FULLSHADOW_DEBUG 1
    5.17  
    5.18  #if SHADOW_DEBUG
    5.19  extern int shadow_status_noswap;
    5.20 @@ -373,7 +373,7 @@ update_hl2e(struct exec_domain *ed, unsi
    5.21      if ( need_flush )
    5.22      {
    5.23          perfc_incrc(update_hl2e_invlpg);
    5.24 -        __flush_tlb_one(&linear_pg_table[l1_linear_offset(va)]);
    5.25 +        local_flush_tlb_one(&linear_pg_table[l1_linear_offset(va)]);
    5.26      }
    5.27  }
    5.28  
    5.29 @@ -959,7 +959,7 @@ static inline unsigned long ___shadow_st
    5.30                  perfc_incrc(shadow_status_hit_head);
    5.31              }
    5.32  
    5.33 -            SH_VVLOG("lookup gpfn=%p => status=%p", key, head->smfn);
    5.34 +            //SH_VVLOG("lookup gpfn=%p => status=%p", key, head->smfn);
    5.35              return head->smfn;
    5.36          }
    5.37  
    5.38 @@ -968,7 +968,7 @@ static inline unsigned long ___shadow_st
    5.39      }
    5.40      while ( x != NULL );
    5.41  
    5.42 -    SH_VVLOG("lookup gpfn=%p => status=0", key);
    5.43 +    //SH_VVLOG("lookup gpfn=%p => status=0", key);
    5.44      perfc_incrc(shadow_status_miss);
    5.45      return 0;
    5.46  }