direct-io.hg

changeset 6150:dfd2ded7b712

Some arch/x86 gnttab cleanups in Xen.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Aug 14 17:32:30 2005 +0000 (2005-08-14)
parents 3fe7b0b7f6c5
children c1bcea912992
files xen/arch/x86/mm.c xen/common/grant_table.c xen/include/asm-x86/mm.h xen/include/public/grant_table.h xen/include/xen/grant_table.h
line diff
     1.1 --- a/xen/arch/x86/mm.c	Sun Aug 14 09:17:54 2005 +0000
     1.2 +++ b/xen/arch/x86/mm.c	Sun Aug 14 17:32:30 2005 +0000
     1.3 @@ -2269,36 +2269,20 @@ int do_mmu_update(
     1.4  }
     1.5  
     1.6  
     1.7 -int update_grant_va_mapping_pte(unsigned long pte_addr,
     1.8 -                                l1_pgentry_t _nl1e, 
     1.9 -                                struct domain *d,
    1.10 -                                struct vcpu *v)
    1.11 +int update_grant_pte_mapping(
    1.12 +    unsigned long pte_addr, l1_pgentry_t _nl1e, 
    1.13 +    struct domain *d, struct vcpu *v)
    1.14  {
    1.15 -    /* Caller must:
    1.16 -     * . own d's BIGLOCK 
    1.17 -     * . already have 'get_page' correctly on the to-be-installed nl1e
    1.18 -     * . be responsible for flushing the TLB
    1.19 -     * . check PTE being installed isn't DISALLOWED
    1.20 -     */
    1.21 -
    1.22      int rc = GNTST_okay;
    1.23      void *va;
    1.24      unsigned long gpfn, mfn;
    1.25      struct pfn_info *page;
    1.26 -    struct domain_mmap_cache mapcache, sh_mapcache;
    1.27      u32 type_info;
    1.28 -    l1_pgentry_t    ol1e;
    1.29 -
    1.30 -    /* Grant tables and shadow mode don't currently work together. */
    1.31 -    ASSERT( !shadow_mode_refcounts(d) );
    1.32 -
    1.33 -    /* There shouldn't be any strange bits set on the PTE. */
    1.34 -    ASSERT( (l1e_get_flags(_nl1e) & L1_DISALLOW_MASK) == 0);
    1.35 -
    1.36 -    cleanup_writable_pagetable(d);
    1.37 -
    1.38 -    domain_mmap_cache_init(&mapcache);
    1.39 -    domain_mmap_cache_init(&sh_mapcache);
    1.40 +    l1_pgentry_t ol1e;
    1.41 +
    1.42 +    ASSERT(spin_is_locked(&d->big_lock));
    1.43 +    ASSERT(!shadow_mode_refcounts(d));
    1.44 +    ASSERT((l1e_get_flags(_nl1e) & L1_DISALLOW_MASK) == 0);
    1.45  
    1.46      gpfn = pte_addr >> PAGE_SHIFT;
    1.47      mfn = __gpfn_to_mfn(d, gpfn);
    1.48 @@ -2306,88 +2290,61 @@ int update_grant_va_mapping_pte(unsigned
    1.49      if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
    1.50      {
    1.51          MEM_LOG("Could not get page for normal update");
    1.52 -        rc = -EINVAL;
    1.53 -        goto failed_norefs;
    1.54 +        return GNTST_general_error;
    1.55      }
    1.56      
    1.57 -    va = map_domain_page_with_cache(mfn, &mapcache);
    1.58 -    va = (void *)((unsigned long)va +
    1.59 -                  (unsigned long)(pte_addr & ~PAGE_MASK));
    1.60 -    page = &frame_table[mfn];
    1.61 +    va = map_domain_page(mfn);
    1.62 +    va = (void *)((unsigned long)va + (pte_addr & ~PAGE_MASK));
    1.63 +    page = pfn_to_page(mfn);
    1.64  
    1.65      type_info = page->u.inuse.type_info;
    1.66 -    if ( (type_info & PGT_type_mask) != PGT_l1_page_table) {
    1.67 +    if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) ||
    1.68 +         !get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask)) )
    1.69 +    {
    1.70          DPRINTK("Grant map attempted to update a non-L1 page\n");
    1.71 -        rc = -EINVAL;
    1.72 +        rc = GNTST_general_error;
    1.73          goto failed;
    1.74      }
    1.75  
    1.76 -    if ( likely(get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask))) )
    1.77 +    if ( __copy_from_user(&ol1e, (l1_pgentry_t *)va, sizeof(ol1e)) ||
    1.78 +         !update_l1e(va, ol1e, _nl1e) )
    1.79      {
    1.80 -
    1.81 -        if ( unlikely(__copy_from_user(&ol1e, (l1_pgentry_t *)va, 
    1.82 -                                       sizeof(ol1e)) != 0) ) {
    1.83 -            put_page_type(page);
    1.84 -            rc = -EINVAL;
    1.85 -            goto failed;
    1.86 -        } 
    1.87 -
    1.88 -        if ( update_l1e(va, ol1e, _nl1e) )
    1.89 -        {
    1.90 -            put_page_from_l1e(ol1e, d);
    1.91 -
    1.92 -            if ( l1e_get_flags(ol1e) & _PAGE_PRESENT )
    1.93 -                rc = GNTST_flush_all; /* We don't know what vaddr to flush */
    1.94 -            else
    1.95 -                rc = GNTST_okay; /* Caller need not invalidate TLB entry */
    1.96 -
    1.97 -            if (  unlikely(shadow_mode_enabled(d)) )
    1.98 -                shadow_l1_normal_pt_update(d, pte_addr, _nl1e, &sh_mapcache);
    1.99 -        }
   1.100 -        else
   1.101 -            rc = -EINVAL;
   1.102 +        put_page_type(page);
   1.103 +        rc = GNTST_general_error;
   1.104 +        goto failed;
   1.105 +    } 
   1.106 +
   1.107 +    put_page_from_l1e(ol1e, d);
   1.108 +
   1.109 +    rc = (l1e_get_flags(ol1e) & _PAGE_PRESENT) ? GNTST_flush_all : GNTST_okay;
   1.110 +
   1.111 +    if ( unlikely(shadow_mode_enabled(d)) )
   1.112 +    {
   1.113 +        struct domain_mmap_cache sh_mapcache;
   1.114 +        domain_mmap_cache_init(&sh_mapcache);
   1.115 +        shadow_l1_normal_pt_update(d, pte_addr, _nl1e, &sh_mapcache);
   1.116 +        domain_mmap_cache_destroy(&sh_mapcache);
   1.117 +    }
   1.118 +
   1.119 +    put_page_type(page);
   1.120   
   1.121 -        put_page_type(page);
   1.122 -    }
   1.123 -
   1.124   failed:
   1.125 -    unmap_domain_page_with_cache(va, &mapcache);
   1.126 +    unmap_domain_page(va);
   1.127      put_page(page);
   1.128 -
   1.129 - failed_norefs:
   1.130 -    domain_mmap_cache_destroy(&mapcache);
   1.131 -    domain_mmap_cache_destroy(&sh_mapcache);
   1.132 -
   1.133      return rc;
   1.134  }
   1.135  
   1.136 -
   1.137 -
   1.138 -int clear_grant_va_mapping_pte(unsigned long addr, unsigned long frame,
   1.139 -                               struct domain *d)
   1.140 +int clear_grant_pte_mapping(
   1.141 +    unsigned long addr, unsigned long frame, struct domain *d)
   1.142  {
   1.143 -    /* Caller must:
   1.144 -     * . own d's BIGLOCK 
   1.145 -     * . already have 'get_page' correctly on the to-be-installed nl1e
   1.146 -     * . be responsible for flushing the TLB
   1.147 -     * . check PTE being installed isn't DISALLOWED
   1.148 -     */
   1.149 -
   1.150      int rc = GNTST_okay;
   1.151      void *va;
   1.152      unsigned long gpfn, mfn;
   1.153      struct pfn_info *page;
   1.154 -    struct domain_mmap_cache mapcache, sh_mapcache;
   1.155      u32 type_info;
   1.156 -    l1_pgentry_t    ol1e;
   1.157 -
   1.158 -    /* Grant tables and shadow mode don't work together. */
   1.159 -    ASSERT( !shadow_mode_refcounts(d) );
   1.160 -
   1.161 -    cleanup_writable_pagetable(d);
   1.162 -
   1.163 -    domain_mmap_cache_init(&mapcache);
   1.164 -    domain_mmap_cache_init(&sh_mapcache);
   1.165 +    l1_pgentry_t ol1e;
   1.166 +
   1.167 +    ASSERT(!shadow_mode_refcounts(d));
   1.168  
   1.169      gpfn = addr >> PAGE_SHIFT;
   1.170      mfn = __gpfn_to_mfn(d, gpfn);
   1.171 @@ -2395,119 +2352,91 @@ int clear_grant_va_mapping_pte(unsigned 
   1.172      if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
   1.173      {
   1.174          MEM_LOG("Could not get page for normal update");
   1.175 -        rc = -EINVAL;
   1.176 -        goto failed_norefs;
   1.177 +        return GNTST_general_error;
   1.178      }
   1.179      
   1.180 -    va = map_domain_page_with_cache(mfn, &mapcache);
   1.181 -    va = (void *)((unsigned long)va +
   1.182 -                  (unsigned long)(addr & ~PAGE_MASK));
   1.183 -    page = &frame_table[mfn];
   1.184 +    va = map_domain_page(mfn);
   1.185 +    va = (void *)((unsigned long)va + (addr & ~PAGE_MASK));
   1.186 +    page = pfn_to_page(mfn);
   1.187  
   1.188      type_info = page->u.inuse.type_info;
   1.189 -    if ( (type_info & PGT_type_mask) != PGT_l1_page_table) {
   1.190 +    if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) ||
   1.191 +         !get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask)) )
   1.192 +    {
   1.193          DPRINTK("Grant map attempted to update a non-L1 page\n");
   1.194 -        rc = -EINVAL;
   1.195 +        rc = GNTST_general_error;
   1.196 +        goto failed;
   1.197 +    }
   1.198 +
   1.199 +    if ( __copy_from_user(&ol1e, (l1_pgentry_t *)va, sizeof(ol1e)) )
   1.200 +    {
   1.201 +        put_page_type(page);
   1.202 +        rc = GNTST_general_error;
   1.203 +        goto failed;
   1.204 +    }
   1.205 +    
   1.206 +    /* Check that the virtual address supplied is actually mapped to frame. */
   1.207 +    if ( unlikely((l1e_get_intpte(ol1e) >> PAGE_SHIFT) != frame) )
   1.208 +    {
   1.209 +        DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n",
   1.210 +                (unsigned long)l1e_get_intpte(ol1e), addr, frame);
   1.211 +        put_page_type(page);
   1.212 +        rc = GNTST_general_error;
   1.213          goto failed;
   1.214      }
   1.215  
   1.216 -    if ( likely(get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask))) )
   1.217 +    /* Delete pagetable entry. */
   1.218 +    if ( unlikely(__put_user(0, (unsigned long *)va)))
   1.219      {
   1.220 -        if ( unlikely(__copy_from_user(&ol1e, (l1_pgentry_t *)va, 
   1.221 -                                       sizeof(ol1e)) != 0) ) 
   1.222 -        {
   1.223 -            rc = -EINVAL;
   1.224 -            put_page_type(page);
   1.225 -            goto failed;
   1.226 -        }
   1.227 -    
   1.228 -        /*
   1.229 -         * Check that the virtual address supplied is actually mapped to frame.
   1.230 -         */
   1.231 -        if ( unlikely((l1e_get_intpte(ol1e) >> PAGE_SHIFT) != frame ))
   1.232 -        {
   1.233 -            DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n",
   1.234 -                    (unsigned long)l1e_get_intpte(ol1e), addr, frame);
   1.235 -            rc =  -EINVAL;
   1.236 -            put_page_type(page);
   1.237 -            goto failed;
   1.238 -        }
   1.239 -
   1.240 -        /* Delete pagetable entry. */
   1.241 -        if ( unlikely(__put_user(0, (unsigned long *)va)))
   1.242 -        {
   1.243 -            DPRINTK("Cannot delete PTE entry at %p.\n", va);
   1.244 -            rc = -EINVAL;
   1.245 -        } else {
   1.246 -            if ( unlikely(shadow_mode_enabled(d)) )
   1.247 -                shadow_l1_normal_pt_update(d, addr, l1e_empty(), 
   1.248 -                                           &sh_mapcache);
   1.249 -        }
   1.250 +        DPRINTK("Cannot delete PTE entry at %p.\n", va);
   1.251          put_page_type(page);
   1.252 +        rc = GNTST_general_error;
   1.253 +        goto failed;
   1.254      }
   1.255  
   1.256 +    if ( unlikely(shadow_mode_enabled(d)) )
   1.257 +    {
   1.258 +        struct domain_mmap_cache sh_mapcache;
   1.259 +        domain_mmap_cache_init(&sh_mapcache);
   1.260 +        shadow_l1_normal_pt_update(d, addr, l1e_empty(), &sh_mapcache);
   1.261 +        domain_mmap_cache_destroy(&sh_mapcache);
   1.262 +    }
   1.263 +
   1.264 +    put_page_type(page);
   1.265 +
   1.266   failed:
   1.267 -    unmap_domain_page_with_cache(va, &mapcache);
   1.268 +    unmap_domain_page(va);
   1.269      put_page(page);
   1.270 -
   1.271 - failed_norefs:
   1.272 -    domain_mmap_cache_destroy(&mapcache);
   1.273 -    domain_mmap_cache_destroy(&sh_mapcache);
   1.274 -
   1.275      return rc;
   1.276  }
   1.277  
   1.278  
   1.279 -
   1.280 -/* This function assumes the caller is holding the domain's BIGLOCK
   1.281 - * and is running in a shadow mode
   1.282 - */
   1.283 -int update_grant_va_mapping(unsigned long va,
   1.284 -                            l1_pgentry_t _nl1e, 
   1.285 -                            struct domain *d,
   1.286 -                            struct vcpu *v)
   1.287 +int update_grant_va_mapping(
   1.288 +    unsigned long va, l1_pgentry_t _nl1e, struct domain *d, struct vcpu *v)
   1.289  {
   1.290 -    /* Caller must:
   1.291 -     * . own d's BIGLOCK 
   1.292 -     * . already have 'get_page' correctly on the to-be-installed nl1e
   1.293 -     * . be responsible for flushing the TLB
   1.294 -     * . check PTE being installed isn't DISALLOWED
   1.295 +    int rc = GNTST_okay;
   1.296 +    l1_pgentry_t *pl1e, ol1e;
   1.297 +    
   1.298 +    ASSERT(spin_is_locked(&d->big_lock));
   1.299 +    ASSERT(!shadow_mode_refcounts(d));
   1.300 +    ASSERT((l1e_get_flags(_nl1e) & L1_DISALLOW_MASK) == 0);
   1.301 +
   1.302 +    /*
   1.303 +     * This is actually overkill - we don't need to sync the L1 itself,
   1.304 +     * just everything involved in getting to this L1 (i.e. we need
   1.305 +     * linear_pg_table[l1_linear_offset(va)] to be in sync)...
   1.306       */
   1.307 -
   1.308 -    int             rc = GNTST_okay;
   1.309 -    l1_pgentry_t   *pl1e;
   1.310 -    l1_pgentry_t    ol1e;
   1.311 -    
   1.312 -    cleanup_writable_pagetable(d);
   1.313 -
   1.314 -    // This is actually overkill - we don't need to sync the L1 itself,
   1.315 -    // just everything involved in getting to this L1 (i.e. we need
   1.316 -    // linear_pg_table[l1_linear_offset(va)] to be in sync)...
   1.317 -    //
   1.318      __shadow_sync_va(v, va);
   1.319  
   1.320      pl1e = &linear_pg_table[l1_linear_offset(va)];
   1.321  
   1.322 -    if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
   1.323 -        rc = -EINVAL;
   1.324 -    else if ( !shadow_mode_refcounts(d) )
   1.325 -    {
   1.326 -        if ( update_l1e(pl1e, ol1e, _nl1e) )
   1.327 -        {
   1.328 -            put_page_from_l1e(ol1e, d);
   1.329 -            if ( l1e_get_flags(ol1e) & _PAGE_PRESENT )
   1.330 -                rc = GNTST_flush_one;
   1.331 -            else
   1.332 -                rc = GNTST_okay; /* Caller need not invalidate TLB entry */
   1.333 -        }
   1.334 -        else
   1.335 -            rc = -EINVAL;
   1.336 -    }
   1.337 -    else
   1.338 -    {
   1.339 -        printk("grant tables and shadow mode currently don't work together\n");
   1.340 -        BUG();
   1.341 -    }
   1.342 +    if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) ||
   1.343 +         !update_l1e(pl1e, ol1e, _nl1e) )
   1.344 +        return GNTST_general_error;
   1.345 +
   1.346 +    put_page_from_l1e(ol1e, d);
   1.347 +
   1.348 +    rc = (l1e_get_flags(ol1e) & _PAGE_PRESENT) ? GNTST_flush_one : GNTST_okay;
   1.349  
   1.350      if ( unlikely(shadow_mode_enabled(d)) )
   1.351          shadow_do_update_va_mapping(va, _nl1e, v);
   1.352 @@ -2517,15 +2446,15 @@ int update_grant_va_mapping(unsigned lon
   1.353  
   1.354  int clear_grant_va_mapping(unsigned long addr, unsigned long frame)
   1.355  {
   1.356 -    l1_pgentry_t   *pl1e;
   1.357 -    unsigned long   _ol1e;
   1.358 +    l1_pgentry_t *pl1e;
   1.359 +    unsigned long _ol1e;
   1.360      
   1.361      pl1e = &linear_pg_table[l1_linear_offset(addr)];
   1.362  
   1.363      if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) )
   1.364      {
   1.365          DPRINTK("Could not find PTE entry for address %lx\n", addr);
   1.366 -        return -EINVAL;
   1.367 +        return GNTST_general_error;
   1.368      }
   1.369  
   1.370      /*
   1.371 @@ -2536,14 +2465,14 @@ int clear_grant_va_mapping(unsigned long
   1.372      {
   1.373          DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n",
   1.374                  _ol1e, addr, frame);
   1.375 -        return -EINVAL;
   1.376 +        return GNTST_general_error;
   1.377      }
   1.378  
   1.379      /* Delete pagetable entry. */
   1.380      if ( unlikely(__put_user(0, (unsigned long *)pl1e)))
   1.381      {
   1.382          DPRINTK("Cannot delete PTE entry at %p.\n", (unsigned long *)pl1e);
   1.383 -        return -EINVAL;
   1.384 +        return GNTST_general_error;
   1.385      }
   1.386      
   1.387      return 0;
   1.388 @@ -2583,10 +2512,11 @@ int do_update_va_mapping(unsigned long v
   1.389                        (shadow_mode_translate(d) ||
   1.390                         shadow_mode_translate(percpu_info[cpu].foreign))) )
   1.391          {
   1.392 -            // The foreign domain's pfn's are in a different namespace.
   1.393 -            // There's not enough information in just a gpte to figure out
   1.394 -            // how to (re-)shadow this entry.
   1.395 -            //
   1.396 +            /*
   1.397 +             * The foreign domain's pfn's are in a different namespace. There's
   1.398 +             * not enough information in just a gpte to figure out how to
   1.399 +             * (re-)shadow this entry.
   1.400 +             */
   1.401              domain_crash();
   1.402          }
   1.403      
   1.404 @@ -3054,7 +2984,7 @@ void ptwr_flush(struct domain *d, const 
   1.405           */
   1.406          BUG();
   1.407      }
   1.408 -    PTWR_PRINTK("[%c] disconnected_l1va at %p is %lx\n",
   1.409 +    PTWR_PRINTK("[%c] disconnected_l1va at %p is %"PRIpte"\n",
   1.410                  PTWR_PRINT_WHICH, ptep, pte.l1);
   1.411      l1e_remove_flags(pte, _PAGE_RW);
   1.412  
   1.413 @@ -3072,7 +3002,7 @@ void ptwr_flush(struct domain *d, const 
   1.414      /* Ensure that there are no stale writable mappings in any TLB. */
   1.415      /* NB. INVLPG is a serialising instruction: flushes pending updates. */
   1.416      flush_tlb_one_mask(d->cpumask, l1va);
   1.417 -    PTWR_PRINTK("[%c] disconnected_l1va at %p now %lx\n",
   1.418 +    PTWR_PRINTK("[%c] disconnected_l1va at %p now %"PRIpte"\n",
   1.419                  PTWR_PRINT_WHICH, ptep, pte.l1);
   1.420  
   1.421      /*
     2.1 --- a/xen/common/grant_table.c	Sun Aug 14 09:17:54 2005 +0000
     2.2 +++ b/xen/common/grant_table.c	Sun Aug 14 17:32:30 2005 +0000
     2.3 @@ -266,7 +266,6 @@ static int
     2.4  
     2.5      spin_unlock(&granting_d->grant_table->lock);
     2.6  
     2.7 -
     2.8      if ( (addr != 0) && (dev_hst_ro_flags & GNTMAP_host_map) )
     2.9      {
    2.10          /* Write update into the pagetable. */
    2.11 @@ -278,14 +277,10 @@ static int
    2.12          if ( !(dev_hst_ro_flags & GNTMAP_readonly) )
    2.13              l1e_add_flags(pte,_PAGE_RW);
    2.14  
    2.15 -        if (!(dev_hst_ro_flags & GNTMAP_contains_pte))
    2.16 -        {
    2.17 -            rc = update_grant_va_mapping( addr, pte, 
    2.18 -                                          mapping_d, mapping_ed );
    2.19 -        } else {
    2.20 -            rc = update_grant_va_mapping_pte( addr, pte, 
    2.21 -                                              mapping_d, mapping_ed );
    2.22 -        }
    2.23 +        if ( dev_hst_ro_flags & GNTMAP_contains_pte )
    2.24 +            rc = update_grant_pte_mapping(addr, pte, mapping_d, mapping_ed);
    2.25 +        else
    2.26 +            rc = update_grant_va_mapping(addr, pte, mapping_d, mapping_ed);
    2.27  
    2.28          /* IMPORTANT: rc indicates the degree of TLB flush that is required.
    2.29           * GNTST_flush_one (1) or GNTST_flush_all (2). This is done in the 
    2.30 @@ -586,11 +581,13 @@ static int
    2.31           (flags & GNTMAP_host_map) &&
    2.32           ((act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask)) > 0))
    2.33      {
    2.34 -        if (flags & GNTMAP_contains_pte) 
    2.35 +        if ( flags & GNTMAP_contains_pte )
    2.36          {
    2.37 -            if ( (rc = clear_grant_va_mapping_pte(addr, frame, ld)) < 0 )
    2.38 +            if ( (rc = clear_grant_pte_mapping(addr, frame, ld)) < 0 )
    2.39                  goto unmap_out;
    2.40 -        } else {
    2.41 +        }
    2.42 +        else
    2.43 +        {
    2.44              if ( (rc = clear_grant_va_mapping(addr, frame)) < 0 )
    2.45                  goto unmap_out;
    2.46          }
    2.47 @@ -961,11 +958,14 @@ do_grant_table_op(
    2.48      unsigned int cmd, void *uop, unsigned int count)
    2.49  {
    2.50      long rc;
    2.51 +    struct domain *d = current->domain;
    2.52  
    2.53      if ( count > 512 )
    2.54          return -EINVAL;
    2.55  
    2.56 -    LOCK_BIGLOCK(current->domain);
    2.57 +    LOCK_BIGLOCK(d);
    2.58 +
    2.59 +    sync_pagetable_state(d);
    2.60  
    2.61      rc = -EFAULT;
    2.62      switch ( cmd )
    2.63 @@ -1001,7 +1001,7 @@ do_grant_table_op(
    2.64      }
    2.65  
    2.66  out:
    2.67 -    UNLOCK_BIGLOCK(current->domain);
    2.68 +    UNLOCK_BIGLOCK(d);
    2.69  
    2.70      return rc;
    2.71  }
     3.1 --- a/xen/include/asm-x86/mm.h	Sun Aug 14 09:17:54 2005 +0000
     3.2 +++ b/xen/include/asm-x86/mm.h	Sun Aug 14 17:32:30 2005 +0000
     3.3 @@ -377,17 +377,14 @@ void propagate_page_fault(unsigned long 
     3.4   * Caller must own d's BIGLOCK, is responsible for flushing the TLB, and must 
     3.5   * hold a reference to the page.
     3.6   */
     3.7 -int update_grant_va_mapping(unsigned long va,
     3.8 -                            l1_pgentry_t _nl1e, 
     3.9 -                            struct domain *d,
    3.10 -                            struct vcpu *v);
    3.11 -int update_grant_va_mapping_pte(unsigned long pte_addr,
    3.12 -                            l1_pgentry_t _nl1e, 
    3.13 -                            struct domain *d,
    3.14 -                            struct vcpu *v);
    3.15 -
    3.16 +int update_grant_va_mapping(
    3.17 +    unsigned long va, l1_pgentry_t _nl1e, 
    3.18 +    struct domain *d, struct vcpu *v);
    3.19 +int update_grant_pte_mapping(
    3.20 +    unsigned long pte_addr, l1_pgentry_t _nl1e, 
    3.21 +    struct domain *d, struct vcpu *v);
    3.22  int clear_grant_va_mapping(unsigned long addr, unsigned long frame);
    3.23 -int clear_grant_va_mapping_pte(unsigned long addr, unsigned long frame,
    3.24 -                            struct domain *d);
    3.25 +int clear_grant_pte_mapping(
    3.26 +    unsigned long addr, unsigned long frame, struct domain *d);
    3.27  
    3.28  #endif /* __ASM_X86_MM_H__ */
     4.1 --- a/xen/include/public/grant_table.h	Sun Aug 14 09:17:54 2005 +0000
     4.2 +++ b/xen/include/public/grant_table.h	Sun Aug 14 17:32:30 2005 +0000
     4.3 @@ -261,8 +261,6 @@ typedef struct {
     4.4  /*
     4.5   * Values for error status returns. All errors are -ve.
     4.6   */
     4.7 -#define GNTST_flush_all        (2)  /* Success, need to flush entire TLB.    */
     4.8 -#define GNTST_flush_one        (1)  /* Success, need to flush a vaddr.       */
     4.9  #define GNTST_okay             (0)  /* Normal return.                        */
    4.10  #define GNTST_general_error    (-1) /* General undefined error.              */
    4.11  #define GNTST_bad_domain       (-2) /* Unrecognsed domain id.                */
     5.1 --- a/xen/include/xen/grant_table.h	Sun Aug 14 09:17:54 2005 +0000
     5.2 +++ b/xen/include/xen/grant_table.h	Sun Aug 14 17:32:30 2005 +0000
     5.3 @@ -53,8 +53,8 @@ typedef struct {
     5.4  
     5.5  #define ORDER_GRANT_FRAMES   2
     5.6  #define NR_GRANT_FRAMES      (1U << ORDER_GRANT_FRAMES)
     5.7 -#define NR_GRANT_ENTRIES     (NR_GRANT_FRAMES * PAGE_SIZE / sizeof(grant_entry_t))
     5.8 -
     5.9 +#define NR_GRANT_ENTRIES     \
    5.10 +    ((NR_GRANT_FRAMES << PAGE_SHIFT) / sizeof(grant_entry_t))
    5.11  
    5.12  /*
    5.13   * Tracks a mapping of another domain's grant reference. Each domain has a
    5.14 @@ -65,8 +65,8 @@ typedef struct {
    5.15      domid_t  domid;         /* granting domain */
    5.16  } grant_mapping_t;
    5.17  #define MAPTRACK_GNTMAP_MASK  0x1f
    5.18 -#define MAPTRACK_REF_SHIFT       5
    5.19 -#define MAPTRACK_MAX_ENTRIES ( 1 << (16 - MAPTRACK_REF_SHIFT) )
    5.20 +#define MAPTRACK_REF_SHIFT    5
    5.21 +#define MAPTRACK_MAX_ENTRIES  (1 << (16 - MAPTRACK_REF_SHIFT))
    5.22  
    5.23  /* Per-domain grant information. */
    5.24  typedef struct {
    5.25 @@ -109,10 +109,15 @@ gnttab_prepare_for_transfer(
    5.26  /* Notify 'rd' of a completed transfer via an already-locked grant entry. */
    5.27  void 
    5.28  gnttab_notify_transfer(
    5.29 -    struct domain *rd, struct domain *ld, grant_ref_t ref, unsigned long frame);
    5.30 +    struct domain *rd, struct domain *ld,
    5.31 +    grant_ref_t ref, unsigned long frame);
    5.32  
    5.33 -/* Pre-domain destruction release of granted device mappings of other domains.*/
    5.34 +/* Domain death release of granted device mappings of other domains.*/
    5.35  void
    5.36  gnttab_release_dev_mappings(grant_table_t *gt);
    5.37  
    5.38 +/* Extra GNTST_ values, for internal use only. */
    5.39 +#define GNTST_flush_all        (2)  /* Success, need to flush entire TLB.    */
    5.40 +#define GNTST_flush_one        (1)  /* Success, need to flush a vaddr.       */
    5.41 +
    5.42  #endif /* __XEN_GRANT_H__ */