direct-io.hg

changeset 5535:f2545bf8f4c7

bitkeeper revision 1.1713.2.21 (42b8e40fW5sWcGtTukrQr1eZYIVyZw)

adds necessary put/get_user, copy_from/to_user, etc stuff for VTI

Signed-off-by Kevin Tian <Kevin.tian@intel.com>
author djm@kirby.fc.hp.com
date Wed Jun 22 04:07:43 2005 +0000 (2005-06-22)
parents 51d718bd11de
children 78b0596ef957
files .rootkeys xen/arch/ia64/patch/linux-2.6.11/uaccess.h xen/arch/ia64/vmmu.c xen/include/asm-ia64/vmx_uaccess.h
line diff
     1.1 --- a/.rootkeys	Tue Jun 21 23:32:45 2005 +0000
     1.2 +++ b/.rootkeys	Wed Jun 22 04:07:43 2005 +0000
     1.3 @@ -1388,6 +1388,7 @@ 428b9f387tov0OtOEeF8fVWSR2v5Pg xen/inclu
     1.4  428b9f38is0zTsIm96_BKo4MLw0SzQ xen/include/asm-ia64/vmx_pal_vsa.h
     1.5  428b9f38iDqbugHUheJrcTCD7zlb4g xen/include/asm-ia64/vmx_phy_mode.h
     1.6  428b9f38grd_B0AGB1yp0Gi2befHaQ xen/include/asm-ia64/vmx_platform.h
     1.7 +42b8e0d63B41CDo2Nqmf8Vt0_RercA xen/include/asm-ia64/vmx_uaccess.h
     1.8  428b9f38XgwHchZEpOzRtWfz0agFNQ xen/include/asm-ia64/vmx_vcpu.h
     1.9  428b9f38tDTTJbkoONcAB9ODP8CiVg xen/include/asm-ia64/vmx_vpd.h
    1.10  428b9f38_o0U5uJqmxZf_bqi6_PqVw xen/include/asm-ia64/vtm.h
     2.1 --- a/xen/arch/ia64/patch/linux-2.6.11/uaccess.h	Tue Jun 21 23:32:45 2005 +0000
     2.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/uaccess.h	Wed Jun 22 04:07:43 2005 +0000
     2.3 @@ -1,6 +1,17 @@
     2.4 ---- ../../linux-2.6.11/include/asm-ia64/uaccess.h	2005-06-06 10:36:23.000000000 -0600
     2.5 -+++ include/asm-ia64/uaccess.h	2005-06-10 18:08:06.000000000 -0600
     2.6 -@@ -60,6 +60,11 @@
     2.7 +--- ../../linux-2.6.11/include/asm-ia64/uaccess.h	2005-03-02 00:37:53.000000000 -0700
     2.8 ++++ include/asm-ia64/uaccess.h	2005-06-21 21:53:20.000000000 -0600
     2.9 +@@ -32,6 +32,10 @@
    2.10 +  *	David Mosberger-Tang <davidm@hpl.hp.com>
    2.11 +  */
    2.12 + 
    2.13 ++#ifdef CONFIG_VTI
    2.14 ++#include <asm/vmx_uaccess.h>
    2.15 ++#else // CONFIG_VTI
    2.16 ++
    2.17 + #include <linux/compiler.h>
    2.18 + #include <linux/errno.h>
    2.19 + #include <linux/sched.h>
    2.20 +@@ -60,6 +64,11 @@
    2.21    * address TASK_SIZE is never valid.  We also need to make sure that the address doesn't
    2.22    * point inside the virtually mapped linear page table.
    2.23    */
    2.24 @@ -12,7 +23,7 @@
    2.25   #define __access_ok(addr, size, segment)						\
    2.26   ({											\
    2.27   	__chk_user_ptr(addr);								\
    2.28 -@@ -67,6 +72,7 @@
    2.29 +@@ -67,6 +76,7 @@
    2.30   	 && ((segment).seg == KERNEL_DS.seg						\
    2.31   	     || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT)));	\
    2.32   })
    2.33 @@ -20,3 +31,11 @@
    2.34   #define access_ok(type, addr, size)	__access_ok((addr), (size), get_fs())
    2.35   
    2.36   static inline int
    2.37 +@@ -343,6 +353,7 @@
    2.38 + 	__su_ret;						\
    2.39 + })
    2.40 + 
    2.41 ++#endif // CONFIG_VTI
    2.42 + /* Generic code can't deal with the location-relative format that we use for compactness.  */
    2.43 + #define ARCH_HAS_SORT_EXTABLE
    2.44 + #define ARCH_HAS_SEARCH_EXTABLE
     3.1 --- a/xen/arch/ia64/vmmu.c	Tue Jun 21 23:32:45 2005 +0000
     3.2 +++ b/xen/arch/ia64/vmmu.c	Wed Jun 22 04:07:43 2005 +0000
     3.3 @@ -792,3 +792,55 @@ IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT6
     3.4      return IA64_NO_FAULT;
     3.5  }
     3.6  
     3.7 +/*
     3.8 + * [FIXME] Is there any effective way to move this routine
     3.9 + * into vmx_uaccess.h? struct exec_domain is incomplete type
    3.10 + * in that way...
    3.11 + *
    3.12 + * This is the interface to lookup virtual TLB, and then
    3.13 + * return corresponding machine address in 2nd parameter.
    3.14 + * The 3rd parameter contains how many bytes mapped by
    3.15 + * matched vTLB entry, thus to allow caller copy more once.
    3.16 + *
    3.17 + * If failed to lookup, -EFAULT is returned. Or else reutrn
    3.18 + * 0. All upper domain access utilities rely on this routine
    3.19 + * to determine the real machine address. 
    3.20 + *
    3.21 + * Yes, put_user and get_user seems to somhow slow upon it.
    3.22 + * However it's the necessary steps for any vmx domain virtual
    3.23 + * address, since that's difference address space as HV's one.
    3.24 + * Later some short-circuit may be created for special case
    3.25 + */
    3.26 +long
    3.27 +__domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len)
    3.28 +{
    3.29 +    unsigned long 	mpfn, gpfn, m, n = *len;
    3.30 +    thash_cb_t		*vtlb;
    3.31 +    unsigned long	end;	/* end of the area mapped by current entry */
    3.32 +    thash_data_t	*entry;
    3.33 +    struct vcpu *v = current;
    3.34 +    ia64_rr	vrr;
    3.35 +
    3.36 +    vtlb = vmx_vcpu_get_vtlb(v); 
    3.37 +    vrr = vmx_vcpu_rr(v, va);
    3.38 +    entry = vtlb_lookup_ex(vtlb, vrr.rid, va, DSIDE_TLB);
    3.39 +    if (entry == NULL)
    3.40 +	return -EFAULT;
    3.41 +
    3.42 +    gpfn =(entry->ppn>>(PAGE_SHIFT-12));
    3.43 +    gpfn =PAGEALIGN(gpfn,(entry->ps-PAGE_SHIFT));
    3.44 +    gpfn = gpfn | POFFSET(va>>PAGE_SHIFT,(entry->ps-PAGE_SHIFT)); 
    3.45 +
    3.46 +    mpfn = __gpfn_to_mfn(v->domain, gpfn);
    3.47 +    m = (mpfn<<PAGE_SHIFT) | (va & (PAGE_SIZE - 1));
    3.48 +    /* machine address may be not continuous */
    3.49 +    end = PAGEALIGN(m, PAGE_SHIFT) + PAGE_SIZE;
    3.50 +    /*end = PAGEALIGN(m, entry->ps) + PSIZE(entry->ps);*/
    3.51 +    /* Current entry can't map all requested area */
    3.52 +    if ((m + n) > end)
    3.53 +	n = end - m;
    3.54 +
    3.55 +    *ma = m;
    3.56 +    *len = n;
    3.57 +    return 0;
    3.58 +}
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/include/asm-ia64/vmx_uaccess.h	Wed Jun 22 04:07:43 2005 +0000
     4.3 @@ -0,0 +1,156 @@
     4.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     4.5 +/*
     4.6 + * vmx_uaccess.h: Defines vmx specific macros to transfer memory areas
     4.7 + * across the domain/hypervisor boundary.
     4.8 + *
     4.9 + * This program is free software; you can redistribute it and/or modify it
    4.10 + * under the terms and conditions of the GNU General Public License,
    4.11 + * version 2, as published by the Free Software Foundation.
    4.12 + *
    4.13 + * This program is distributed in the hope it will be useful, but WITHOUT
    4.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    4.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    4.16 + * more details.
    4.17 + *
    4.18 + * You should have received a copy of the GNU General Public License along with
    4.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    4.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
    4.21 + *
    4.22 + * Note:  For vmx enabled environment, poor man's policy is actually
    4.23 + * useless since HV resides in completely different address space as
    4.24 + * domain. So the only way to do the access is search vTLB first, and
    4.25 + * access identity mapped address if hit.
    4.26 + *
    4.27 + * Copyright (c) 2004, Intel Corporation.
    4.28 + * 	Kun Tian (Kevin Tian) (kevin.tian@intel.com)
    4.29 + */
    4.30 +
    4.31 +#ifndef __ASM_IA64_VMX_UACCESS_H__
    4.32 +#define __ASM_IA64_VMX_UACCESS_H__
    4.33 +
    4.34 +#include <xen/compiler.h>
    4.35 +#include <xen/errno.h>
    4.36 +#include <xen/sched.h>
    4.37 +
    4.38 +#include <asm/intrinsics.h>
    4.39 +#include <asm/vmmu.h>
    4.40 +
    4.41 +/* Since HV never accesses domain space directly, most security check can
    4.42 + * be dummy now
    4.43 + */
    4.44 +asm (".section \"__ex_table\", \"a\"\n\t.previous");
    4.45 +
    4.46 +/* For back compatibility */
    4.47 +#define __access_ok(addr, size, segment)	1
    4.48 +#define access_ok(addr, size, segment)	__access_ok((addr), (size), (segment))
    4.49 +
    4.50 +/*
    4.51 + * These are the main single-value transfer routines.  They automatically
    4.52 + * use the right size if we just have the right pointer type.
    4.53 + *
    4.54 + * Careful to not
    4.55 + * (a) re-use the arguments for side effects (sizeof/typeof is ok)
    4.56 + * (b) require any knowledge of processes at this stage
    4.57 + */
    4.58 +#define put_user(x, ptr)	__put_user((x), (ptr))
    4.59 +#define get_user(x, ptr)	__get_user((x), (ptr))
    4.60 +
    4.61 +#define __put_user(x, ptr)	__do_put_user((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
    4.62 +#define __get_user(x, ptr)	__do_get_user((x), (ptr), sizeof(*(ptr)))
    4.63 +
    4.64 +/* TODO: add specific unaligned access later. If assuming aligned at
    4.65 + * 1,2,4,8 bytes by far, it's impossible for operand spaning two
    4.66 + * vTLB entry
    4.67 + */
    4.68 +extern long
    4.69 +__domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len);
    4.70 +
    4.71 +#define __do_put_user(x, ptr, size)					\
    4.72 +({									\
    4.73 +    __typeof__ (x) __pu_x = (x);					\
    4.74 +    __typeof__ (*(ptr)) __user *__pu_ptr = (ptr);			\
    4.75 +    __typeof__ (size) __pu_size = (size);				\
    4.76 +    unsigned long __pu_ma;						\
    4.77 +    long __pu_err;							\
    4.78 +									\
    4.79 +    __pu_err = __domain_va_to_ma((unsigned long)__pu_ptr,		\
    4.80 +				&__pu_ma, &__pu_size);			\
    4.81 +    __pu_err ? (__pu_err = -EFAULT) :					\
    4.82 +    	(*((__typeof__ (*(ptr)) *)__va(__pu_ma)) = x);			\
    4.83 +    __pu_err;								\
    4.84 +})
    4.85 +
    4.86 +#define __do_get_user(x, ptr, size)					\
    4.87 +({									\
    4.88 +    __typeof__ (x) __gu_x = (x);					\
    4.89 +    __typeof__ (*(ptr)) __user *__gu_ptr = (ptr);			\
    4.90 +    __typeof__ (size) __gu_size = (size);				\
    4.91 +    unsigned long __gu_ma;						\
    4.92 +    long __gu_err;							\
    4.93 +									\
    4.94 +    __gu_err = __domain_va_to_ma((unsigned long)__gu_ptr,		\
    4.95 +				&__gu_ma, &__gu_size);			\
    4.96 +    __gu_err ? (__gu_err = -EFAULT) :					\
    4.97 +    	(x = *((__typeof__ (*(ptr)) *)__va(__gu_ma)));			\
    4.98 +    __gu_err;								\
    4.99 +})
   4.100 +
   4.101 +/* More complex copy from domain */
   4.102 +#define copy_from_user(to, from, n)	__copy_from_user((to), (from), (n))
   4.103 +#define copy_to_user(to, from, n)	__copy_to_user((to), (from), (n))
   4.104 +#define clear_user(to, n)		__clear_user((t0), (n))
   4.105 +
   4.106 +static inline unsigned long
   4.107 +__copy_from_user(void *to, void *from, unsigned long n)
   4.108 +{
   4.109 +    unsigned long ma, i;
   4.110 +
   4.111 +    i = n;
   4.112 +    while(!__domain_va_to_ma((unsigned long)from, &ma, &i)) {
   4.113 +	    memcpy(to, (void *)__va(ma), i);
   4.114 +	    n -= i;
   4.115 +        if (!n)
   4.116 +            break;
   4.117 +	    from += i;
   4.118 +	    to += i;
   4.119 +	    i = n;
   4.120 +    }
   4.121 +    return n;
   4.122 +}
   4.123 +
   4.124 +static inline unsigned long
   4.125 +__copy_to_user(void *to, void *from, unsigned long n)
   4.126 +{
   4.127 +    unsigned long ma, i;
   4.128 +
   4.129 +    i = n;
   4.130 +    while(!__domain_va_to_ma((unsigned long)to, &ma, &i)) {
   4.131 +	    memcpy((void *)__va(ma), from, i);
   4.132 +	    n -= i;
   4.133 +        if (!n)
   4.134 +            break;
   4.135 +	    from += i;
   4.136 +	    to += i;
   4.137 +	    i = n;
   4.138 +    }
   4.139 +    return n;
   4.140 +}
   4.141 +
   4.142 +static inline unsigned long
   4.143 +__clear_user(void *to, unsigned long n)
   4.144 +{
   4.145 +    unsigned long ma, i;
   4.146 +
   4.147 +    i = n;
   4.148 +    while(!__domain_va_to_ma((unsigned long)to, &ma, &i)) {
   4.149 +	    memset((void *)__va(ma), 0, i);
   4.150 +	    n -= i;
   4.151 +        if (!n)
   4.152 +            break;
   4.153 +	    to += i;
   4.154 +	    i = n;
   4.155 +    }
   4.156 +    return n;
   4.157 +}
   4.158 +
   4.159 +#endif // __ASM_IA64_VMX_UACCESS_H__