ia64/xen-unstable

changeset 4016:66a6d4a1d88b

bitkeeper revision 1.1236.9.3 (422dcf06pBEc-gi-qwPDx52pb_x1Ig)

Added updates to pagetables in grant table map and unmap operations.
author cwc22@centipede.cl.cam.ac.uk
date Tue Mar 08 16:12:54 2005 +0000 (2005-03-08)
parents d400d2b7feaf
children b4a40ce41618
files xen/arch/x86/mm.c xen/common/grant_table.c xen/include/asm-x86/mm.h xen/include/asm-x86/shadow.h xen/include/public/grant_table.h
line diff
     1.1 --- a/xen/arch/x86/mm.c	Fri Mar 04 11:04:09 2005 +0000
     1.2 +++ b/xen/arch/x86/mm.c	Tue Mar 08 16:12:54 2005 +0000
     1.3 @@ -1910,16 +1910,124 @@ int do_mmu_update(
     1.4      return rc;
     1.5  }
     1.6  
     1.7 +void update_shadow_va_mapping(unsigned long va,
     1.8 +                              unsigned long val,
     1.9 +                              struct exec_domain *ed,
    1.10 +                              struct domain *d)
    1.11 +{
    1.12 +    /* This function assumes the caller is holding the domain's BIGLOCK
    1.13 +     * and is running in a shadow mode
    1.14 +     */
    1.15 +
    1.16 +    unsigned long   sval = 0;
    1.17 +
    1.18 +    l1pte_propagate_from_guest(d, &val, &sval);
    1.19 +
    1.20 +    if ( unlikely(__put_user(sval, ((unsigned long *)(
    1.21 +        &shadow_linear_pg_table[l1_linear_offset(va)])))) )
    1.22 +    {
    1.23 +        /*
    1.24 +         * Since L2's are guranteed RW, failure indicates either that the
    1.25 +         * page was not shadowed, or that the L2 entry has not yet been
    1.26 +         * updated to reflect the shadow.
    1.27 +         */
    1.28 +        l2_pgentry_t gpde = linear_l2_table[l2_table_offset(va)];
    1.29 +        unsigned long gpfn = l2_pgentry_val(gpde) >> PAGE_SHIFT;
    1.30 +
    1.31 +        if (get_shadow_status(d, gpfn))
    1.32 +        {
    1.33 +            unsigned long gmfn = __gpfn_to_mfn(d, gpfn);
    1.34 +            unsigned long *gl1e = map_domain_mem(gmfn << PAGE_SHIFT);
    1.35 +            unsigned l1_idx = l1_table_offset(va);
    1.36 +            gl1e[l1_idx] = sval;
    1.37 +            unmap_domain_mem(gl1e);
    1.38 +            put_shadow_status(d);
    1.39 +
    1.40 +            perfc_incrc(shadow_update_va_fail1);
    1.41 +        }
    1.42 +        else
    1.43 +            perfc_incrc(shadow_update_va_fail2);
    1.44 +    }
    1.45 +
    1.46 +    /*
    1.47 +     * If we're in log-dirty mode then we need to note that we've updated
    1.48 +     * the PTE in the PT-holding page. We need the machine frame number
    1.49 +     * for this.
    1.50 +     */
    1.51 +    if ( shadow_mode_log_dirty(d) )
    1.52 +        mark_dirty(d, va_to_l1mfn(va));
    1.53 +
    1.54 +    check_pagetable(d, ed->arch.guest_table, "va"); /* debug */
    1.55 +}
    1.56 +
    1.57 +int update_grant_va_mapping(unsigned long va,
    1.58 +                            unsigned long _nl1e, 
    1.59 +                            struct domain *d,
    1.60 +                            struct exec_domain *ed)
    1.61 +{
    1.62 +    /* Caller must:
    1.63 +     * . own d's BIGLOCK 
    1.64 +     * . already have 'get_page' correctly on the to-be-installed nl1e
    1.65 +     * . be responsible for flushing the TLB
    1.66 +     * . check PTE being installed isn't DISALLOWED
    1.67 +     */
    1.68 +
    1.69 +    /* Return value:
    1.70 +     * -ve : error
    1.71 +     * 0   : done
    1.72 +     * GNTUPDVA_prev_ro : done & prior mapping was ro to same frame
    1.73 +     * GNTUPDVA_prev_rw : done & prior mapping was rw to same frame
    1.74 +     */
    1.75 +
    1.76 +    int             rc = 0;
    1.77 +    l1_pgentry_t   *pl1e;
    1.78 +    unsigned long   _ol1e;
    1.79 +
    1.80 +    cleanup_writable_pagetable(d);
    1.81 +
    1.82 +    pl1e = &linear_pg_table[l1_linear_offset(va)];
    1.83 +
    1.84 +    if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) )
    1.85 +        rc = -EINVAL;
    1.86 +    else
    1.87 +    {
    1.88 +        l1_pgentry_t ol1e = mk_l1_pgentry(_ol1e);
    1.89 +
    1.90 +        if ( update_l1e(pl1e, ol1e, mk_l1_pgentry(_nl1e)) )
    1.91 +        {
    1.92 +            /* overwrote different mfn?  */
    1.93 +            if (((_ol1e ^ _nl1e) & (PADDR_MASK & PAGE_MASK)) != 0)
    1.94 +            {
    1.95 +                rc = 0;
    1.96 +                put_page_from_l1e(ol1e, d);
    1.97 +            }
    1.98 +            else
    1.99 +                rc = ((_ol1e & _PAGE_RW) ? GNTUPDVA_prev_rw
   1.100 +                                         : GNTUPDVA_prev_ro );
   1.101 +                /* use return code to avoid nasty grant table
   1.102 +                 * slow path in put_page_from_l1e -- caller
   1.103 +                 * must handle ref count instead. */
   1.104 +        }
   1.105 +        else
   1.106 +            rc = -EINVAL;
   1.107 +    }
   1.108 +
   1.109 +    if ( unlikely(shadow_mode_enabled(d)) )
   1.110 +        update_shadow_va_mapping(va, _nl1e, ed, d);
   1.111 +
   1.112 +    return rc;
   1.113 +}
   1.114 +
   1.115  
   1.116  int do_update_va_mapping(unsigned long va,
   1.117                           unsigned long val, 
   1.118                           unsigned long flags)
   1.119  {
   1.120 -    struct exec_domain *ed = current;
   1.121 -    struct domain *d = ed->domain;
   1.122 -    int err = 0;
   1.123 -    unsigned int cpu = ed->processor;
   1.124 -    unsigned long deferred_ops;
   1.125 +    struct exec_domain      *ed  = current;
   1.126 +    struct domain           *d   = ed->domain;
   1.127 +    unsigned int             cpu = ed->processor;
   1.128 +    unsigned long            deferred_ops;
   1.129 +    int                      rc = 0;
   1.130  
   1.131      perfc_incrc(calls_to_update_va);
   1.132  
   1.133 @@ -1940,50 +2048,10 @@ int do_update_va_mapping(unsigned long v
   1.134  
   1.135      if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
   1.136                                  mk_l1_pgentry(val))) )
   1.137 -        err = -EINVAL;
   1.138 +        rc = -EINVAL;
   1.139  
   1.140      if ( unlikely(shadow_mode_enabled(d)) )
   1.141 -    {
   1.142 -        unsigned long sval = 0;
   1.143 -
   1.144 -        l1pte_propagate_from_guest(d, &val, &sval);
   1.145 -
   1.146 -        if ( unlikely(__put_user(sval, ((unsigned long *)(
   1.147 -            &shadow_linear_pg_table[l1_linear_offset(va)])))) )
   1.148 -        {
   1.149 -            /*
   1.150 -             * Since L2's are guranteed RW, failure indicates either that the
   1.151 -             * page was not shadowed, or that the L2 entry has not yet been
   1.152 -             * updated to reflect the shadow.
   1.153 -             */
   1.154 -            l2_pgentry_t gpde = linear_l2_table[l2_table_offset(va)];
   1.155 -            unsigned long gpfn = l2_pgentry_val(gpde) >> PAGE_SHIFT;
   1.156 -
   1.157 -            if (get_shadow_status(d, gpfn))
   1.158 -            {
   1.159 -                unsigned long gmfn = __gpfn_to_mfn(d, gpfn);
   1.160 -                unsigned long *gl1e = map_domain_mem(gmfn << PAGE_SHIFT);
   1.161 -                unsigned l1_idx = l1_table_offset(va);
   1.162 -                gl1e[l1_idx] = sval;
   1.163 -                unmap_domain_mem(gl1e);
   1.164 -                put_shadow_status(d);
   1.165 -
   1.166 -                perfc_incrc(shadow_update_va_fail1);
   1.167 -            }
   1.168 -            else
   1.169 -                perfc_incrc(shadow_update_va_fail2);
   1.170 -        }
   1.171 -
   1.172 -        /*
   1.173 -         * If we're in log-dirty mode then we need to note that we've updated
   1.174 -         * the PTE in the PT-holding page. We need the machine frame number
   1.175 -         * for this.
   1.176 -         */
   1.177 -        if ( shadow_mode_log_dirty(d) )
   1.178 -            mark_dirty(d, va_to_l1mfn(va));
   1.179 -  
   1.180 -        check_pagetable(d, ed->arch.guest_table, "va"); /* debug */
   1.181 -    }
   1.182 +        update_shadow_va_mapping(va, val, ed, d);
   1.183  
   1.184      deferred_ops = percpu_info[cpu].deferred_ops;
   1.185      percpu_info[cpu].deferred_ops = 0;
   1.186 @@ -1999,7 +2067,7 @@ int do_update_va_mapping(unsigned long v
   1.187      
   1.188      UNLOCK_BIGLOCK(d);
   1.189  
   1.190 -    return err;
   1.191 +    return rc;
   1.192  }
   1.193  
   1.194  int do_update_va_mapping_otherdomain(unsigned long va,
     2.1 --- a/xen/common/grant_table.c	Fri Mar 04 11:04:09 2005 +0000
     2.2 +++ b/xen/common/grant_table.c	Tue Mar 08 16:12:54 2005 +0000
     2.3 @@ -4,6 +4,7 @@
     2.4   * Mechanism for granting foreign access to page frames, and receiving
     2.5   * page-ownership transfers.
     2.6   * 
     2.7 + * Copyright (c) 2005 Christopher Clark
     2.8   * Copyright (c) 2004 K A Fraser
     2.9   * 
    2.10   * This program is free software; you can redistribute it and/or modify
    2.11 @@ -23,6 +24,8 @@
    2.12  
    2.13  #include <xen/config.h>
    2.14  #include <xen/sched.h>
    2.15 +#include <asm-x86/mm.h>
    2.16 +#include <asm-x86/shadow.h>
    2.17  
    2.18  #define PIN_FAIL(_rc, _f, _a...)   \
    2.19      do {                           \
    2.20 @@ -50,19 +53,24 @@ put_maptrack_handle(
    2.21      t->maptrack_head = handle;
    2.22  }
    2.23  
    2.24 -static void
    2.25 +static int
    2.26  __gnttab_map_grant_ref(
    2.27 -    gnttab_map_grant_ref_t *uop)
    2.28 +    gnttab_map_grant_ref_t *uop,
    2.29 +    unsigned long *va)
    2.30  {
    2.31 -    domid_t        dom, sdom;
    2.32 -    grant_ref_t    ref;
    2.33 -    struct domain *ld, *rd;
    2.34 -    u16            flags, sflags;
    2.35 -    int            handle;
    2.36 +    domid_t               dom, sdom;
    2.37 +    grant_ref_t           ref;
    2.38 +    struct domain        *ld, *rd;
    2.39 +    struct exec_domain   *led;
    2.40 +    u16                   flags, sflags;
    2.41 +    int                   handle;
    2.42      active_grant_entry_t *act;
    2.43 -    grant_entry_t *sha;
    2.44 -    s16            rc = 0;
    2.45 -    unsigned long  frame;
    2.46 +    grant_entry_t        *sha;
    2.47 +    s16                   rc = 0;
    2.48 +    unsigned long         frame = 0, host_virt_addr;
    2.49 +
    2.50 +    /* Returns 0 if TLB flush / invalidate required by caller.
    2.51 +     * va will indicate the address to be invalidated. */
    2.52  
    2.53      /*
    2.54       * We bound the number of times we retry CMPXCHG on memory locations that
    2.55 @@ -74,23 +82,33 @@ static void
    2.56       */
    2.57      int            retries = 0;
    2.58  
    2.59 -    ld = current->domain;
    2.60 +    led = current;
    2.61 +    ld = led->domain;
    2.62  
    2.63      /* Bitwise-OR avoids short-circuiting which screws control flow. */
    2.64      if ( unlikely(__get_user(dom, &uop->dom) |
    2.65                    __get_user(ref, &uop->ref) |
    2.66 +                  __get_user(host_virt_addr, &uop->host_virt_addr) |
    2.67                    __get_user(flags, &uop->flags)) )
    2.68      {
    2.69          DPRINTK("Fault while reading gnttab_map_grant_ref_t.\n");
    2.70 -        return; /* don't set status */
    2.71 +        return -EFAULT; /* don't set status */
    2.72      }
    2.73  
    2.74 -    if ( unlikely(ref >= NR_GRANT_ENTRIES) || 
    2.75 +    if ( ((host_virt_addr != 0) || (flags & GNTMAP_host_map) ) &&
    2.76 +         unlikely(!__addr_ok(host_virt_addr)))
    2.77 +    {
    2.78 +        DPRINTK("Bad virtual address (%x) or flags (%x).\n", host_virt_addr, flags);
    2.79 +        (void)__put_user(GNTST_bad_virt_addr, &uop->handle);
    2.80 +        return GNTST_bad_gntref;
    2.81 +    }
    2.82 +
    2.83 +    if ( unlikely(ref >= NR_GRANT_ENTRIES) ||
    2.84           unlikely((flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) )
    2.85      {
    2.86          DPRINTK("Bad ref (%d) or flags (%x).\n", ref, flags);
    2.87          (void)__put_user(GNTST_bad_gntref, &uop->handle);
    2.88 -        return;
    2.89 +        return GNTST_bad_gntref;
    2.90      }
    2.91  
    2.92      if ( unlikely((rd = find_domain_by_id(dom)) == NULL) ||
    2.93 @@ -100,7 +118,7 @@ static void
    2.94              put_domain(rd);
    2.95          DPRINTK("Could not find domain %d\n", dom);
    2.96          (void)__put_user(GNTST_bad_domain, &uop->handle);
    2.97 -        return;
    2.98 +        return GNTST_bad_domain;
    2.99      }
   2.100  
   2.101      if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) )
   2.102 @@ -108,7 +126,7 @@ static void
   2.103          put_domain(rd);
   2.104          DPRINTK("No more map handles available\n");
   2.105          (void)__put_user(GNTST_no_device_space, &uop->handle);
   2.106 -        return;
   2.107 +        return GNTST_no_device_space;
   2.108      }
   2.109      DPRINTK("Mapping grant ref (%hu) for domain (%hu) with flags (%x)\n",
   2.110              ref, dom, flags);
   2.111 @@ -117,7 +135,7 @@ static void
   2.112      sha = &rd->grant_table->shared[ref];
   2.113  
   2.114      spin_lock(&rd->grant_table->lock);
   2.115 -    
   2.116 +
   2.117      if ( act->pin == 0 )
   2.118      {
   2.119          /* CASE 1: Activating a previously inactive entry. */
   2.120 @@ -150,7 +168,7 @@ static void
   2.121  
   2.122              /* NB. prev_scombo is updated in place to seen value. */
   2.123              if ( unlikely(cmpxchg_user((u32 *)&sha->flags,
   2.124 -                                       prev_scombo, 
   2.125 +                                       prev_scombo,
   2.126                                         new_scombo)) )
   2.127                  PIN_FAIL(GNTST_general_error,
   2.128                           "Fault while modifying shared flags and domid.\n");
   2.129 @@ -170,16 +188,18 @@ static void
   2.130          }
   2.131  
   2.132          /* rmb(); */ /* not on x86 */
   2.133 -        frame = sha->frame;
   2.134 -        if ( unlikely(!pfn_is_ram(frame)) || 
   2.135 -             unlikely(!((flags & GNTMAP_readonly) ? 
   2.136 -                        get_page(&frame_table[frame], rd) : 
   2.137 -                        get_page_and_type(&frame_table[frame], rd, 
   2.138 +
   2.139 +        frame = __translate_gpfn_to_mfn(rd, sha->frame);
   2.140 +
   2.141 +        if ( unlikely(!pfn_is_ram(frame)) ||
   2.142 +             unlikely(!((flags & GNTMAP_readonly) ?
   2.143 +                        get_page(&frame_table[frame], rd) :
   2.144 +                        get_page_and_type(&frame_table[frame], rd,
   2.145                                            PGT_writable_page))) )
   2.146          {
   2.147              clear_bit(_GTF_writing, &sha->flags);
   2.148              clear_bit(_GTF_reading, &sha->flags);
   2.149 -            PIN_FAIL(GNTST_general_error, 
   2.150 +            PIN_FAIL(GNTST_general_error,
   2.151                       "Could not pin the granted frame!\n");
   2.152          }
   2.153  
   2.154 @@ -232,7 +252,9 @@ static void
   2.155                  sflags = prev_sflags;
   2.156              }
   2.157  
   2.158 -            if ( unlikely(!get_page_type(&frame_table[act->frame],
   2.159 +            frame = act->frame;
   2.160 +
   2.161 +            if ( unlikely(!get_page_type(&frame_table[frame],
   2.162                                           PGT_writable_page)) )
   2.163              {
   2.164                  clear_bit(_GTF_writing, &sha->flags);
   2.165 @@ -253,34 +275,91 @@ static void
   2.166      ld->grant_table->maptrack[handle].ref_and_flags =
   2.167          (ref << MAPTRACK_REF_SHIFT) | (flags & MAPTRACK_GNTMAP_MASK);
   2.168  
   2.169 +    if ( (host_virt_addr != 0) && (flags & GNTMAP_host_map) )
   2.170 +    {
   2.171 +        /* Write update into the pagetable
   2.172 +         */
   2.173 +        if ( 0 > (rc = update_grant_va_mapping( host_virt_addr,
   2.174 +                                (frame << PAGE_SHIFT) | _PAGE_PRESENT  |
   2.175 +                                                        _PAGE_ACCESSED |
   2.176 +                                                        _PAGE_DIRTY    |
   2.177 +                       ((flags & GNTMAP_readonly) ? 0 : _PAGE_RW),
   2.178 +                       ld, led )) )
   2.179 +        {
   2.180 +            /* Abort. */
   2.181 +            act->pin -= (flags & GNTMAP_readonly) ?
   2.182 +                GNTPIN_hstr_inc : GNTPIN_hstw_inc;
   2.183 +
   2.184 +            if ( flags & GNTMAP_readonly )
   2.185 +                act->pin -= GNTPIN_hstr_inc;
   2.186 +            else
   2.187 +            {
   2.188 +                act->pin -= GNTPIN_hstw_inc;
   2.189 +                if ( (act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask)) == 0 )
   2.190 +                    put_page_type(&frame_table[frame]);
   2.191 +
   2.192 +                if ( act->pin == 0 )
   2.193 +                    put_page(&frame_table[frame]);
   2.194 +            }
   2.195 +            goto fail;
   2.196 +        }
   2.197 +
   2.198 +        if ( rc == GNTUPDVA_prev_ro )
   2.199 +            act->pin -= GNTPIN_hstr_inc;
   2.200 +
   2.201 +        if ( rc == GNTUPDVA_prev_rw ) 
   2.202 +        {
   2.203 +            act->pin -= GNTPIN_hstw_inc;
   2.204 +            put_page_type(&frame_table[frame]);
   2.205 +        }
   2.206 +        rc = 0;
   2.207 +        *va = host_virt_addr;
   2.208 +
   2.209 +        /* IMPORTANT: must flush / invalidate entry in TLB.
   2.210 +         * This is done in the outer gnttab_map_grant_ref when return 0.
   2.211 +         */
   2.212 +    }
   2.213 +
   2.214 +    if ( flags & GNTMAP_device_map )
   2.215 +        (void)__put_user(frame,  &uop->dev_bus_addr);
   2.216 +
   2.217      /* Unchecked and unconditional. */
   2.218      (void)__put_user(handle, &uop->handle);
   2.219 -    (void)__put_user(act->frame,  &uop->dev_bus_addr);
   2.220  
   2.221      spin_unlock(&rd->grant_table->lock);
   2.222      put_domain(rd);
   2.223 -    return;
   2.224 +    return 0;
   2.225  
   2.226   fail:
   2.227      (void)__put_user(rc, &uop->handle);
   2.228      spin_unlock(&rd->grant_table->lock);
   2.229      put_domain(rd);
   2.230 -    put_maptrack_handle(ld->grant_table, handle);
   2.231 +    put_maptrack_handle(ld->grant_table, handle); //cwc22: check this
   2.232 +    return rc;
   2.233  }
   2.234  
   2.235  static long
   2.236  gnttab_map_grant_ref(
   2.237      gnttab_map_grant_ref_t *uop, unsigned int count)
   2.238  {
   2.239 -    int i;
   2.240 +    int i, flush = 0;
   2.241 +    unsigned long va;
   2.242 +
   2.243      for ( i = 0; i < count; i++ )
   2.244 -        __gnttab_map_grant_ref(&uop[i]);
   2.245 +        if ( __gnttab_map_grant_ref(&uop[i], &va) == 0)
   2.246 +            flush++;
   2.247 +
   2.248 +    if ( flush == 1 )
   2.249 +        __flush_tlb_one(va);
   2.250 +    else if ( flush )
   2.251 +        local_flush_tlb();
   2.252      return 0;
   2.253  }
   2.254  
   2.255 -static void
   2.256 +static int
   2.257  __gnttab_unmap_grant_ref(
   2.258 -    gnttab_unmap_grant_ref_t *uop)
   2.259 +    gnttab_unmap_grant_ref_t *uop,
   2.260 +    unsigned long *va)
   2.261  {
   2.262      domid_t        dom;
   2.263      grant_ref_t    ref;
   2.264 @@ -290,7 +369,7 @@ static void
   2.265      active_grant_entry_t *act;
   2.266      grant_entry_t *sha;
   2.267      grant_mapping_t *map;
   2.268 -    s16            rc = 0;
   2.269 +    s16            rc = -EFAULT;
   2.270      unsigned long  frame, virt;
   2.271  
   2.272      ld = current->domain;
   2.273 @@ -301,7 +380,7 @@ static void
   2.274                    __get_user(handle, &uop->handle)) )
   2.275      {
   2.276          DPRINTK("Fault while reading gnttab_unmap_grant_ref_t.\n");
   2.277 -        return; /* don't set status */
   2.278 +        return -EFAULT; /* don't set status */
   2.279      }
   2.280  
   2.281      map = &ld->grant_table->maptrack[handle];
   2.282 @@ -311,7 +390,7 @@ static void
   2.283      {
   2.284          DPRINTK("Bad handle (%d).\n", handle);
   2.285          (void)__put_user(GNTST_bad_handle, &uop->status);
   2.286 -        return;
   2.287 +        return GNTST_bad_handle;
   2.288      }
   2.289  
   2.290      dom = map->domid;
   2.291 @@ -324,7 +403,7 @@ static void
   2.292              put_domain(rd);
   2.293          DPRINTK("Could not find domain %d\n", dom);
   2.294          (void)__put_user(GNTST_bad_domain, &uop->status);
   2.295 -        return;
   2.296 +        return GNTST_bad_domain;
   2.297      }
   2.298      DPRINTK("Unmapping grant ref (%hu) for domain (%hu) with handle (%hu)\n",
   2.299              ref, dom, handle);
   2.300 @@ -348,12 +427,53 @@ static void
   2.301          frame = act->frame;
   2.302      }
   2.303  
   2.304 -    if ( (virt != 0) && (map->ref_and_flags & GNTMAP_host_map) )
   2.305 +    if ( (virt != 0) &&
   2.306 +         (map->ref_and_flags & GNTMAP_host_map) &&
   2.307 +         ((act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask)) > 0))
   2.308      {
   2.309 -        act->pin -= (map->ref_and_flags & GNTMAP_readonly) ?
   2.310 -            GNTPIN_hstr_inc : GNTPIN_hstw_inc;
   2.311 +        l1_pgentry_t   *pl1e;
   2.312 +        unsigned long   _ol1e;
   2.313 +
   2.314 +        pl1e = &linear_pg_table[l1_linear_offset(virt)];
   2.315 +                                                                                            
   2.316 +        if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) )
   2.317 +        {
   2.318 +            DPRINTK("Could not find PTE entry for address %x\n", virt);
   2.319 +            rc = -EINVAL;
   2.320 +            goto fail;
   2.321 +        }
   2.322 +
   2.323 +        /* check that the virtual address supplied is actually
   2.324 +         * mapped to act->frame.
   2.325 +         */
   2.326 +        if ( unlikely((_ol1e >> PAGE_SHIFT) != frame ))
   2.327 +        {
   2.328 +            DPRINTK("PTE entry %x for address %x doesn't match frame %x\n",
   2.329 +                    _ol1e, virt, frame);
   2.330 +            rc = -EINVAL;
   2.331 +            goto fail;
   2.332 +        }
   2.333 +
   2.334 +        /* This code _requires_ that the act->pin bits are updated
   2.335 +         * if a mapping is ever switched between RO and RW.
   2.336 +         */
   2.337 +        act->pin -= ( _ol1e & _PAGE_RW ) ? GNTPIN_hstw_inc
   2.338 +                                         : GNTPIN_hstr_inc;
   2.339 +
   2.340 +        /* Delete pagetable entry
   2.341 +         */
   2.342 +        if ( unlikely(__put_user(0, (unsigned long *)pl1e)))
   2.343 +        {
   2.344 +            DPRINTK("Cannot delete PTE entry at %x for virtual address %x\n",
   2.345 +                    pl1e, virt);
   2.346 +            rc = -EINVAL;
   2.347 +            goto fail;
   2.348 +        }
   2.349 +        rc = 0;
   2.350 +        *va = virt;
   2.351      }
   2.352  
   2.353 +    /* If the last writable mapping has been removed, put_page_type */
   2.354      if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
   2.355                !(map->ref_and_flags & GNTMAP_readonly) )
   2.356      {
   2.357 @@ -371,15 +491,24 @@ static void
   2.358      (void)__put_user(rc, &uop->status);
   2.359      spin_unlock(&rd->grant_table->lock);
   2.360      put_domain(rd);
   2.361 +    return rc;
   2.362  }
   2.363  
   2.364  static long
   2.365  gnttab_unmap_grant_ref(
   2.366      gnttab_unmap_grant_ref_t *uop, unsigned int count)
   2.367  {
   2.368 -    int i;
   2.369 +    int i, flush = 0;
   2.370 +    unsigned long va = 0;
   2.371 +
   2.372      for ( i = 0; i < count; i++ )
   2.373 -        __gnttab_unmap_grant_ref(&uop[i]);
   2.374 +        if ( __gnttab_unmap_grant_ref(&uop[i], &va) == 0)
   2.375 +            flush++;
   2.376 +
   2.377 +    if ( flush == 1 )
   2.378 +        __flush_tlb_one(va);
   2.379 +    else if ( flush )
   2.380 +        local_flush_tlb();
   2.381      return 0;
   2.382  }
   2.383  
   2.384 @@ -570,6 +699,11 @@ int
   2.385  gnttab_check_unmap(
   2.386      struct domain *rd, struct domain *ld, unsigned long frame, int readonly)
   2.387  {
   2.388 +    /* TODO: beat the caller around the head with a brick.
   2.389 +     *       have to walk the grant tables to find this thing.
   2.390 +     */
   2.391 +    DPRINTK("gnttab_check_unmap remote dom(%d) local dom(%d) frame (%x) flags(%x).\n",
   2.392 +            rd->id, ld->id, frame, readonly);
   2.393      return 0;
   2.394  }
   2.395  
   2.396 @@ -646,8 +780,17 @@ gnttab_prepare_for_transfer(
   2.397  
   2.398  void 
   2.399  gnttab_notify_transfer(
   2.400 -    struct domain *rd, grant_ref_t ref, unsigned long frame)
   2.401 +    struct domain *rd, grant_ref_t ref, unsigned long sframe)
   2.402  {
   2.403 +    unsigned long frame;
   2.404 +
   2.405 +    /* cwc22
   2.406 +     * TODO: this requires that the machine_to_phys_mapping
   2.407 +     *       has already been updated, so the accept_transfer hypercall
   2.408 +     *       must do this.
   2.409 +     */
   2.410 +    frame = __mfn_to_gpfn(rd, sframe);
   2.411 +
   2.412      wmb(); /* Ensure that the reassignment is globally visible. */
   2.413      rd->grant_table->shared[ref].frame = frame;
   2.414  }
     3.1 --- a/xen/include/asm-x86/mm.h	Fri Mar 04 11:04:09 2005 +0000
     3.2 +++ b/xen/include/asm-x86/mm.h	Tue Mar 08 16:12:54 2005 +0000
     3.3 @@ -341,4 +341,14 @@ void audit_domains(void);
     3.4  
     3.5  void propagate_page_fault(unsigned long addr, u16 error_code);
     3.6  
     3.7 +/* update_grant_va_mapping
     3.8 + * Caller must own d's BIGLOCK, is responsible for flushing the TLB,
     3.9 + * and have already get_page'd */
    3.10 +int update_grant_va_mapping(unsigned long va,
    3.11 +                            unsigned long val,
    3.12 +                            struct domain *d,
    3.13 +                            struct exec_domain *ed);
    3.14 +#define GNTUPDVA_prev_ro 1
    3.15 +#define GNTUPDVA_prev_rw 2
    3.16 +
    3.17  #endif /* __ASM_X86_MM_H__ */
     4.1 --- a/xen/include/asm-x86/shadow.h	Fri Mar 04 11:04:09 2005 +0000
     4.2 +++ b/xen/include/asm-x86/shadow.h	Tue Mar 08 16:12:54 2005 +0000
     4.3 @@ -56,6 +56,62 @@ extern void vmx_shadow_clear_state(struc
     4.4        ? phys_to_machine_mapping(gpfn)                  \
     4.5        : (gpfn) )
     4.6  
     4.7 +#define __translate_gpfn_to_mfn(_d, gpfn)              \
     4.8 +    ( (shadow_mode_translate(_d))                      \
     4.9 +      ? translate_gpfn_to_mfn(_d, gpfn)                \
    4.10 +      : (gpfn) )
    4.11 +
    4.12 +static inline unsigned long
    4.13 +translate_gpfn_to_mfn(struct domain *rd, unsigned long gpfn)
    4.14 +{
    4.15 +    unsigned long       ma_of_phys_to_mach;
    4.16 +    l2_pgentry_t       *l2_table;
    4.17 +    l2_pgentry_t        l2_entry;
    4.18 +    unsigned long       ma_of_l1_table;
    4.19 +    l1_pgentry_t       *l1_table;
    4.20 +    l1_pgentry_t        pte;
    4.21 +    unsigned long       mfn = 0;
    4.22 +
    4.23 +    /*
    4.24 +     * translation of: (domain, gpfn) -> mfn
    4.25 +     * where domain != current, and is in translate shadow mode
    4.26 +     */
    4.27 +
    4.28 +    ASSERT( shadow_mode_translate(rd) );
    4.29 +
    4.30 +    shadow_lock(rd);
    4.31 +
    4.32 +    /* TODO: check using shadow_lock is correct
    4.33 +     * TODO: move arch.phys_table from exec_domain to domain
    4.34 +     *       - use of zero index is a hack - FIXME
    4.35 +     */
    4.36 +
    4.37 +    ma_of_phys_to_mach = pagetable_val( (rd->exec_domain[0])->arch.phys_table );
    4.38 +
    4.39 +    l2_table = (l2_pgentry_t *) map_domain_mem( ma_of_phys_to_mach );
    4.40 +    l2_entry = l2_table[ gpfn >> (L2_PAGETABLE_SHIFT - PAGE_SHIFT) ];
    4.41 +
    4.42 +    unmap_domain_mem( l2_table );
    4.43 +
    4.44 +    if ( l2_pgentry_val(l2_entry) == 0 )
    4.45 +        goto unlock_out;
    4.46 +
    4.47 +    ma_of_l1_table = l2_pgentry_to_phys( l2_entry );
    4.48 +
    4.49 +    l1_table = (l1_pgentry_t *) map_domain_mem( ma_of_l1_table );
    4.50 +    pte      = l1_table[ (gpfn >> (L1_PAGETABLE_SHIFT - PAGE_SHIFT)) &
    4.51 +                         (L1_PAGETABLE_ENTRIES - 1 ) ];
    4.52 +
    4.53 +    unmap_domain_mem( l1_table );
    4.54 +
    4.55 +    mfn = l1_pgentry_to_pfn(pte);
    4.56 +
    4.57 +unlock_out:
    4.58 +    shadow_unlock(rd);
    4.59 +
    4.60 +    return mfn;
    4.61 +}
    4.62 +
    4.63  extern void __shadow_mode_disable(struct domain *d);
    4.64  static inline void shadow_mode_disable(struct domain *d)
    4.65  {
     5.1 --- a/xen/include/public/grant_table.h	Fri Mar 04 11:04:09 2005 +0000
     5.2 +++ b/xen/include/public/grant_table.h	Tue Mar 08 16:12:54 2005 +0000
     5.3 @@ -246,9 +246,10 @@ typedef struct {
     5.4  #define GNTST_general_error    (-1) /* General undefined error.              */
     5.5  #define GNTST_bad_domain       (-2) /* Unrecognsed domain id.                */
     5.6  #define GNTST_bad_gntref       (-3) /* Unrecognised or inappropriate gntref. */
     5.7 -#define GNTST_bad_handle       (-3) /* Unrecognised or inappropriate handle. */
     5.8 -#define GNTST_no_device_space  (-4) /* Out of space in I/O MMU.              */
     5.9 -#define GNTST_permission_denied (-5) /* Not enough privilege for operation.  */
    5.10 +#define GNTST_bad_handle       (-4) /* Unrecognised or inappropriate handle. */
    5.11 +#define GNTST_bad_virt_addr    (-5) /* Inappropriate virtual address to map. */
    5.12 +#define GNTST_no_device_space  (-6) /* Out of space in I/O MMU.              */
    5.13 +#define GNTST_permission_denied (-7) /* Not enough privilege for operation.  */
    5.14  
    5.15  #define GNTTABOP_error_msgs {                   \
    5.16      "okay",                                     \
    5.17 @@ -256,6 +257,7 @@ typedef struct {
    5.18      "unrecognised domain id",                   \
    5.19      "invalid grant reference",                  \
    5.20      "invalid mapping handle",                   \
    5.21 +    "invalid virtual address",                  \
    5.22      "no spare translation slot in the I/O MMU", \
    5.23      "permission denied"                         \
    5.24  }