direct-io.hg

changeset 4659:97d3b54f6d13

bitkeeper revision 1.1385 (426e0a64bO3xeXCBe6s8h2DX9QnqbQ)

Simple batched writable p.t. support for multi-processor guests.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Apr 26 09:31:16 2005 +0000 (2005-04-26)
parents d528142a1daf
children fe711d5cee02 0474fdc198e2
files xen/arch/x86/mm.c xen/include/asm-x86/mm.h
line diff
     1.1 --- a/xen/arch/x86/mm.c	Mon Apr 25 22:21:26 2005 +0000
     1.2 +++ b/xen/arch/x86/mm.c	Tue Apr 26 09:31:16 2005 +0000
     1.3 @@ -2477,9 +2477,11 @@ void ptwr_flush(struct domain *d, const 
     1.4      int            i;
     1.5      unsigned int   modified = 0;
     1.6  
     1.7 -    // not supported in combination with various shadow modes!
     1.8 -    ASSERT( !shadow_mode_enabled(d) );
     1.9 -    
    1.10 +    ASSERT(!shadow_mode_enabled(d));
    1.11 +
    1.12 +    if ( unlikely(d->arch.ptwr[which].ed != current) )
    1.13 +        write_ptbase(d->arch.ptwr[which].ed);
    1.14 +
    1.15      l1va = d->arch.ptwr[which].l1va;
    1.16      ptep = (unsigned long *)&linear_pg_table[l1_linear_offset(l1va)];
    1.17  
    1.18 @@ -2513,7 +2515,7 @@ void ptwr_flush(struct domain *d, const 
    1.19  
    1.20      /* Ensure that there are no stale writable mappings in any TLB. */
    1.21      /* NB. INVLPG is a serialising instruction: flushes pending updates. */
    1.22 -    local_flush_tlb_one(l1va); /* XXX Multi-CPU guests? */
    1.23 +    flush_tlb_one_mask(d->cpuset, l1va);
    1.24      PTWR_PRINTK("[%c] disconnected_l1va at %p now %lx\n",
    1.25                  PTWR_PRINT_WHICH, ptep, pte);
    1.26  
    1.27 @@ -2579,6 +2581,9 @@ void ptwr_flush(struct domain *d, const 
    1.28       */
    1.29  
    1.30      d->arch.ptwr[which].l1va = 0;
    1.31 +
    1.32 +    if ( unlikely(d->arch.ptwr[which].ed != current) )
    1.33 +        write_ptbase(current);
    1.34  }
    1.35  
    1.36  static int ptwr_emulated_update(
    1.37 @@ -2741,7 +2746,7 @@ int ptwr_do_page_fault(struct domain *d,
    1.38      page = &frame_table[pfn];
    1.39  
    1.40      /* We are looking only for read-only mappings of p.t. pages. */
    1.41 -    if ( ((l1e_get_flags(pte) & (_PAGE_RW | _PAGE_PRESENT)) != _PAGE_PRESENT) ||
    1.42 +    if ( ((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) != _PAGE_PRESENT) ||
    1.43           ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
    1.44           (page_get_owner(page) != d) )
    1.45      {
    1.46 @@ -2753,10 +2758,6 @@ int ptwr_do_page_fault(struct domain *d,
    1.47      goto emulate;
    1.48  #endif
    1.49  
    1.50 -    /* Writable pagetables are not yet SMP safe. Use emulator for now. */
    1.51 -    if ( d->exec_domain[0]->ed_next_list != NULL )
    1.52 -        goto emulate;
    1.53 -
    1.54      /* Get the L2 index at which this L1 p.t. is always mapped. */
    1.55      l2_idx = page->u.inuse.type_info & PGT_va_mask;
    1.56      if ( unlikely(l2_idx >= PGT_va_unknown) )
    1.57 @@ -2785,7 +2786,21 @@ int ptwr_do_page_fault(struct domain *d,
    1.58                (l2_idx == d->arch.ptwr[PTWR_PT_ACTIVE].l2_idx)) )
    1.59              which = PTWR_PT_ACTIVE;
    1.60      }
    1.61 -    
    1.62 +
    1.63 +    /*
    1.64 +     * If this is a multi-processor guest then ensure that the page is hooked
    1.65 +     * into at most one L2 table, which must be the one running on this VCPU.
    1.66 +     */
    1.67 +    if ( (d->exec_domain[0]->ed_next_list != NULL) &&
    1.68 +         ((page->u.inuse.type_info & PGT_count_mask) != 
    1.69 +          (!!(page->u.inuse.type_info & PGT_pinned) +
    1.70 +           (which == PTWR_PT_ACTIVE))) )
    1.71 +    {
    1.72 +        /* Could be conflicting writable mappings from other VCPUs. */
    1.73 +        cleanup_writable_pagetable(d);
    1.74 +        goto emulate;
    1.75 +    }
    1.76 +
    1.77      PTWR_PRINTK("[%c] page_fault on l1 pt at va %lx, pt for %08x, "
    1.78                  "pfn %lx\n", PTWR_PRINT_WHICH,
    1.79                  addr, l2_idx << L2_PAGETABLE_SHIFT, pfn);
    1.80 @@ -2810,12 +2825,13 @@ int ptwr_do_page_fault(struct domain *d,
    1.81  
    1.82      d->arch.ptwr[which].l1va   = addr | 1;
    1.83      d->arch.ptwr[which].l2_idx = l2_idx;
    1.84 +    d->arch.ptwr[which].ed     = current;
    1.85      
    1.86      /* For safety, disconnect the L1 p.t. page from current space. */
    1.87      if ( which == PTWR_PT_ACTIVE )
    1.88      {
    1.89          l2e_remove_flags(pl2e, _PAGE_PRESENT);
    1.90 -        local_flush_tlb(); /* XXX Multi-CPU guests? */
    1.91 +        flush_tlb_mask(d->cpuset);
    1.92      }
    1.93      
    1.94      /* Temporarily map the L1 page, and make a copy of it. */
     2.1 --- a/xen/include/asm-x86/mm.h	Mon Apr 25 22:21:26 2005 +0000
     2.2 +++ b/xen/include/asm-x86/mm.h	Tue Apr 26 09:31:16 2005 +0000
     2.3 @@ -301,6 +301,8 @@ struct ptwr_info {
     2.4      unsigned int l2_idx; /* NB. Only used for PTWR_PT_ACTIVE. */
     2.5      /* Info about last ptwr update batch. */
     2.6      unsigned int prev_nr_updates;
     2.7 +    /* Exec domain which created writable mapping. */
     2.8 +    struct exec_domain *ed;
     2.9  };
    2.10  
    2.11  #define PTWR_PT_ACTIVE 0