ia64/xen-unstable

changeset 4573:dfe18db08708

bitkeeper revision 1.1327 (42650c157OdzpVLoIU2uHsYHltTfYg)

Remove unused VERIFY_READ and VERIFY_WRITE parameters from the
access_ok, user-space memory check macros.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Apr 19 13:48:05 2005 +0000 (2005-04-19)
parents 147d96171c0e
children 260db0c4de3b 58efb3448933 c69fbe48a357
files xen/arch/x86/mm.c xen/arch/x86/traps.c xen/arch/x86/x86_32/usercopy.c xen/arch/x86/x86_64/usercopy.c xen/common/dom_mem_ops.c xen/common/grant_table.c xen/common/multicall.c xen/common/physdev.c xen/include/asm-x86/x86_32/uaccess.h xen/include/asm-x86/x86_64/uaccess.h
line diff
     1.1 --- a/xen/arch/x86/mm.c	Tue Apr 19 13:41:12 2005 +0000
     1.2 +++ b/xen/arch/x86/mm.c	Tue Apr 19 13:48:05 2005 +0000
     1.3 @@ -1464,7 +1464,7 @@ int do_mmuext_op(
     1.4          goto out;
     1.5      }
     1.6  
     1.7 -    if ( unlikely(!array_access_ok(VERIFY_READ, uops, count, sizeof(op))) )
     1.8 +    if ( unlikely(!array_access_ok(uops, count, sizeof(op))) )
     1.9      {
    1.10          rc = -EFAULT;
    1.11          goto out;
    1.12 @@ -1644,7 +1644,7 @@ int do_mmuext_op(
    1.13              unsigned long ents = op.nr_ents;
    1.14              if ( ((ptr & (PAGE_SIZE-1)) != 0) || 
    1.15                   (ents > 8192) ||
    1.16 -                 !array_access_ok(VERIFY_READ, ptr, ents, LDT_ENTRY_SIZE) )
    1.17 +                 !array_access_ok(ptr, ents, LDT_ENTRY_SIZE) )
    1.18              {
    1.19                  okay = 0;
    1.20                  MEM_LOG("Bad args to SET_LDT: ptr=%p, ents=%p", ptr, ents);
    1.21 @@ -1819,7 +1819,7 @@ int do_mmu_update(
    1.22      perfc_addc(num_page_updates, count);
    1.23      perfc_incr_histo(bpt_updates, count, PT_UPDATES);
    1.24  
    1.25 -    if ( unlikely(!array_access_ok(VERIFY_READ, ureqs, count, sizeof(req))) )
    1.26 +    if ( unlikely(!array_access_ok(ureqs, count, sizeof(req))) )
    1.27      {
    1.28          rc = -EFAULT;
    1.29          goto out;
    1.30 @@ -2591,7 +2591,7 @@ static int ptwr_emulated_update(
    1.31      struct domain *d = current->domain;
    1.32  
    1.33      /* Aligned access only, thank you. */
    1.34 -    if ( !access_ok(VERIFY_WRITE, addr, bytes) || ((addr & (bytes-1)) != 0) )
    1.35 +    if ( !access_ok(addr, bytes) || ((addr & (bytes-1)) != 0) )
    1.36      {
    1.37          MEM_LOG("ptwr_emulate: Unaligned or bad size ptwr access (%d, %p)\n",
    1.38                  bytes, addr);
     2.1 --- a/xen/arch/x86/traps.c	Tue Apr 19 13:41:12 2005 +0000
     2.2 +++ b/xen/arch/x86/traps.c	Tue Apr 19 13:48:05 2005 +0000
     2.3 @@ -1105,25 +1105,25 @@ long set_debugreg(struct exec_domain *p,
     2.4      switch ( reg )
     2.5      {
     2.6      case 0: 
     2.7 -        if ( !access_ok(VERIFY_READ, value, sizeof(long)) )
     2.8 +        if ( !access_ok(value, sizeof(long)) )
     2.9              return -EPERM;
    2.10          if ( p == current ) 
    2.11              __asm__ ( "mov %0, %%db0" : : "r" (value) );
    2.12          break;
    2.13      case 1: 
    2.14 -        if ( !access_ok(VERIFY_READ, value, sizeof(long)) )
    2.15 +        if ( !access_ok(value, sizeof(long)) )
    2.16              return -EPERM;
    2.17          if ( p == current ) 
    2.18              __asm__ ( "mov %0, %%db1" : : "r" (value) );
    2.19          break;
    2.20      case 2: 
    2.21 -        if ( !access_ok(VERIFY_READ, value, sizeof(long)) )
    2.22 +        if ( !access_ok(value, sizeof(long)) )
    2.23              return -EPERM;
    2.24          if ( p == current ) 
    2.25              __asm__ ( "mov %0, %%db2" : : "r" (value) );
    2.26          break;
    2.27      case 3:
    2.28 -        if ( !access_ok(VERIFY_READ, value, sizeof(long)) )
    2.29 +        if ( !access_ok(value, sizeof(long)) )
    2.30              return -EPERM;
    2.31          if ( p == current ) 
    2.32              __asm__ ( "mov %0, %%db3" : : "r" (value) );
     3.1 --- a/xen/arch/x86/x86_32/usercopy.c	Tue Apr 19 13:41:12 2005 +0000
     3.2 +++ b/xen/arch/x86/x86_32/usercopy.c	Tue Apr 19 13:48:05 2005 +0000
     3.3 @@ -59,7 +59,7 @@ do {									\
     3.4  unsigned long
     3.5  clear_user(void __user *to, unsigned long n)
     3.6  {
     3.7 -	if (access_ok(VERIFY_WRITE, to, n))
     3.8 +	if (access_ok(to, n))
     3.9  		__do_clear_user(to, n);
    3.10  	return n;
    3.11  }
    3.12 @@ -410,7 +410,7 @@ unsigned long
    3.13  unsigned long
    3.14  copy_to_user(void __user *to, const void *from, unsigned long n)
    3.15  {
    3.16 -	if (access_ok(VERIFY_WRITE, to, n))
    3.17 +	if (access_ok(to, n))
    3.18  		n = __copy_to_user(to, from, n);
    3.19  	return n;
    3.20  }
    3.21 @@ -434,7 +434,7 @@ copy_to_user(void __user *to, const void
    3.22  unsigned long
    3.23  copy_from_user(void *to, const void __user *from, unsigned long n)
    3.24  {
    3.25 -	if (access_ok(VERIFY_READ, from, n))
    3.26 +	if (access_ok(from, n))
    3.27  		n = __copy_from_user(to, from, n);
    3.28  	else
    3.29  		memset(to, 0, n);
     4.1 --- a/xen/arch/x86/x86_64/usercopy.c	Tue Apr 19 13:41:12 2005 +0000
     4.2 +++ b/xen/arch/x86/x86_64/usercopy.c	Tue Apr 19 13:48:05 2005 +0000
     4.3 @@ -127,7 +127,7 @@ unsigned long
     4.4  
     4.5  unsigned long clear_user(void *to, unsigned long n)
     4.6  {
     4.7 -	if (access_ok(VERIFY_WRITE, to, n))
     4.8 +	if (access_ok(to, n))
     4.9  		return __clear_user(to, n);
    4.10  	return n;
    4.11  }
    4.12 @@ -148,7 +148,7 @@ unsigned long clear_user(void *to, unsig
    4.13  unsigned long
    4.14  copy_to_user(void __user *to, const void *from, unsigned n)
    4.15  {
    4.16 -	if (access_ok(VERIFY_WRITE, to, n))
    4.17 +	if (access_ok(to, n))
    4.18  		n = __copy_to_user(to, from, n);
    4.19  	return n;
    4.20  }
    4.21 @@ -172,7 +172,7 @@ copy_to_user(void __user *to, const void
    4.22  unsigned long
    4.23  copy_from_user(void *to, const void __user *from, unsigned n)
    4.24  {
    4.25 -	if (access_ok(VERIFY_READ, from, n))
    4.26 +	if (access_ok(from, n))
    4.27  		n = __copy_from_user(to, from, n);
    4.28  	else
    4.29  		memset(to, 0, n);
     5.1 --- a/xen/common/dom_mem_ops.c	Tue Apr 19 13:41:12 2005 +0000
     5.2 +++ b/xen/common/dom_mem_ops.c	Tue Apr 19 13:48:05 2005 +0000
     5.3 @@ -41,8 +41,8 @@ alloc_dom_mem(struct domain *d,
     5.4      struct pfn_info *page;
     5.5      unsigned long    i;
     5.6  
     5.7 -    if ( unlikely(!array_access_ok(VERIFY_WRITE, extent_list, 
     5.8 -                                   nr_extents, sizeof(*extent_list))) )
     5.9 +    if ( unlikely(!array_access_ok(extent_list, nr_extents,
    5.10 +                                   sizeof(*extent_list))) )
    5.11          return start_extent;
    5.12  
    5.13      if ( (extent_order != 0) && !IS_CAPABLE_PHYSDEV(current->domain) )
    5.14 @@ -79,8 +79,8 @@ free_dom_mem(struct domain *d,
    5.15      struct pfn_info *page;
    5.16      unsigned long    i, j, mpfn;
    5.17  
    5.18 -    if ( unlikely(!array_access_ok(VERIFY_READ, extent_list, 
    5.19 -                                   nr_extents, sizeof(*extent_list))) )
    5.20 +    if ( unlikely(!array_access_ok(extent_list, nr_extents,
    5.21 +                                   sizeof(*extent_list))) )
    5.22          return start_extent;
    5.23  
    5.24      for ( i = start_extent; i < nr_extents; i++ )
     6.1 --- a/xen/common/grant_table.c	Tue Apr 19 13:41:12 2005 +0000
     6.2 +++ b/xen/common/grant_table.c	Tue Apr 19 13:48:05 2005 +0000
     6.3 @@ -812,13 +812,13 @@ do_grant_table_op(
     6.4      {
     6.5      case GNTTABOP_map_grant_ref:
     6.6          if ( unlikely(!array_access_ok(
     6.7 -            VERIFY_WRITE, uop, count, sizeof(gnttab_map_grant_ref_t))) )
     6.8 +            uop, count, sizeof(gnttab_map_grant_ref_t))) )
     6.9              goto out;
    6.10          rc = gnttab_map_grant_ref((gnttab_map_grant_ref_t *)uop, count);
    6.11          break;
    6.12      case GNTTABOP_unmap_grant_ref:
    6.13          if ( unlikely(!array_access_ok(
    6.14 -            VERIFY_WRITE, uop, count, sizeof(gnttab_unmap_grant_ref_t))) )
    6.15 +            uop, count, sizeof(gnttab_unmap_grant_ref_t))) )
    6.16              goto out;
    6.17          rc = gnttab_unmap_grant_ref((gnttab_unmap_grant_ref_t *)uop, count);
    6.18          break;
     7.1 --- a/xen/common/multicall.c	Tue Apr 19 13:41:12 2005 +0000
     7.2 +++ b/xen/common/multicall.c	Tue Apr 19 13:48:05 2005 +0000
     7.3 @@ -24,8 +24,7 @@ long do_multicall(multicall_entry_t *cal
     7.4          return -EINVAL;
     7.5      }
     7.6  
     7.7 -    if ( unlikely(!array_access_ok(VERIFY_WRITE, call_list, 
     7.8 -                                   nr_calls, sizeof(*call_list))) )
     7.9 +    if ( unlikely(!array_access_ok(call_list, nr_calls, sizeof(*call_list))) )
    7.10      {
    7.11          DPRINTK("Bad memory range %p for %u*%u bytes.\n",
    7.12                  call_list, nr_calls, sizeof(*call_list));
     8.1 --- a/xen/common/physdev.c	Tue Apr 19 13:41:12 2005 +0000
     8.2 +++ b/xen/common/physdev.c	Tue Apr 19 13:48:05 2005 +0000
     8.3 @@ -712,7 +712,7 @@ long do_physdev_op(physdev_op_t *uop)
     8.4  
     8.5      case PHYSDEVOP_SET_IOBITMAP:
     8.6          ret = -EINVAL;
     8.7 -        if ( !access_ok(VERIFY_READ, op.u.set_iobitmap.bitmap, IOBMP_BYTES) ||
     8.8 +        if ( !access_ok(op.u.set_iobitmap.bitmap, IOBMP_BYTES) ||
     8.9               (op.u.set_iobitmap.nr_ports > 65536) )
    8.10              break;
    8.11          ret = 0;
     9.1 --- a/xen/include/asm-x86/x86_32/uaccess.h	Tue Apr 19 13:41:12 2005 +0000
     9.2 +++ b/xen/include/asm-x86/x86_32/uaccess.h	Tue Apr 19 13:48:05 2005 +0000
     9.3 @@ -11,9 +11,6 @@
     9.4  
     9.5  #define __user
     9.6  
     9.7 -#define VERIFY_READ 0
     9.8 -#define VERIFY_WRITE 1
     9.9 -
    9.10  /*
    9.11   * movsl can be slow when source and dest are not both 8-byte aligned
    9.12   */
    9.13 @@ -39,10 +36,10 @@ extern struct movsl_mask {
    9.14  		:"1" (addr),"g" ((int)(size)),"r" (HYPERVISOR_VIRT_START)); \
    9.15  	flag; })
    9.16  
    9.17 -#define access_ok(type,addr,size) (likely(__range_not_ok(addr,size) == 0))
    9.18 +#define access_ok(addr,size) (likely(__range_not_ok(addr,size) == 0))
    9.19  
    9.20 -#define array_access_ok(type,addr,count,size) \
    9.21 -    (likely(count < (~0UL/size)) && access_ok(type,addr,count*size))
    9.22 +#define array_access_ok(addr,count,size) \
    9.23 +    (likely(count < (~0UL/size)) && access_ok(addr,count*size))
    9.24  
    9.25  extern long __get_user_bad(void);
    9.26  extern void __put_user_bad(void);
    10.1 --- a/xen/include/asm-x86/x86_64/uaccess.h	Tue Apr 19 13:41:12 2005 +0000
    10.2 +++ b/xen/include/asm-x86/x86_64/uaccess.h	Tue Apr 19 13:48:05 2005 +0000
    10.3 @@ -12,9 +12,6 @@
    10.4  
    10.5  #define __user
    10.6  
    10.7 -#define VERIFY_READ 0
    10.8 -#define VERIFY_WRITE 1
    10.9 -
   10.10  /*
   10.11   * Valid if in +ve half of 48-bit address space, or above Xen-reserved area.
   10.12   * This is also valid for range checks (addr, addr+size). As long as the
   10.13 @@ -25,9 +22,9 @@
   10.14      (((unsigned long)(addr) < (1UL<<48)) || \
   10.15       ((unsigned long)(addr) >= HYPERVISOR_VIRT_END))
   10.16  
   10.17 -#define access_ok(type, addr, size) (__addr_ok(addr))
   10.18 +#define access_ok(addr, size) (__addr_ok(addr))
   10.19  
   10.20 -#define array_access_ok(type,addr,count,size) (__addr_ok(addr))
   10.21 +#define array_access_ok(addr, count, size) (__addr_ok(addr))
   10.22  
   10.23  extern long __get_user_bad(void);
   10.24  extern void __put_user_bad(void);