ia64/xen-unstable

changeset 11822:aed7ef54fbfe

[IA64] Remove unused code (vmx_uaccess).

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Wed Oct 18 22:06:49 2006 -0600 (2006-10-18)
parents fffb36174ddb
children 2a9c0f4682ed
files xen/arch/ia64/vmx/vmmu.c xen/include/asm-ia64/vmx_uaccess.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmmu.c	Wed Oct 18 22:06:38 2006 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Wed Oct 18 22:06:49 2006 -0600
     1.3 @@ -735,52 +735,3 @@ IA64FAULT vmx_vcpu_tak(VCPU *vcpu, u64 v
     1.4      }
     1.5      return IA64_NO_FAULT;
     1.6  }
     1.7 -
     1.8 -/*
     1.9 - * [FIXME] Is there any effective way to move this routine
    1.10 - * into vmx_uaccess.h? struct exec_domain is incomplete type
    1.11 - * in that way...
    1.12 - *
    1.13 - * This is the interface to lookup virtual TLB, and then
    1.14 - * return corresponding machine address in 2nd parameter.
    1.15 - * The 3rd parameter contains how many bytes mapped by
    1.16 - * matched vTLB entry, thus to allow caller copy more once.
    1.17 - *
    1.18 - * If failed to lookup, -EFAULT is returned. Or else reutrn
    1.19 - * 0. All upper domain access utilities rely on this routine
    1.20 - * to determine the real machine address. 
    1.21 - *
    1.22 - * Yes, put_user and get_user seems to somhow slow upon it.
    1.23 - * However it's the necessary steps for any vmx domain virtual
    1.24 - * address, since that's difference address space as HV's one.
    1.25 - * Later some short-circuit may be created for special case
    1.26 - */
    1.27 -long
    1.28 -__domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len)
    1.29 -{
    1.30 -    unsigned long  mpfn, gpfn, m, n = *len;
    1.31 -    unsigned long  end;   /* end of the area mapped by current entry */
    1.32 -    thash_data_t   *entry;
    1.33 -    struct vcpu *v = current;
    1.34 -
    1.35 -    entry = vtlb_lookup(v, va, DSIDE_TLB);
    1.36 -    if (entry == NULL)
    1.37 -        return -EFAULT;
    1.38 -
    1.39 -    gpfn =(entry->ppn>>(PAGE_SHIFT-12));
    1.40 -    gpfn =PAGEALIGN(gpfn,(entry->ps-PAGE_SHIFT));
    1.41 -    gpfn = gpfn | POFFSET(va>>PAGE_SHIFT,(entry->ps-PAGE_SHIFT)); 
    1.42 -
    1.43 -    mpfn = gmfn_to_mfn(v->domain, gpfn);
    1.44 -    m = (mpfn<<PAGE_SHIFT) | (va & (PAGE_SIZE - 1));
    1.45 -    /* machine address may be not continuous */
    1.46 -    end = PAGEALIGN(m, PAGE_SHIFT) + PAGE_SIZE;
    1.47 -    /*end = PAGEALIGN(m, entry->ps) + PSIZE(entry->ps);*/
    1.48 -    /* Current entry can't map all requested area */
    1.49 -    if ((m + n) > end)
    1.50 -        n = end - m;
    1.51 -
    1.52 -    *ma = m;
    1.53 -    *len = n;
    1.54 -    return 0;
    1.55 -}
     2.1 --- a/xen/include/asm-ia64/vmx_uaccess.h	Wed Oct 18 22:06:38 2006 -0600
     2.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.3 @@ -1,156 +0,0 @@
     2.4 -/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     2.5 -/*
     2.6 - * vmx_uaccess.h: Defines vmx specific macros to transfer memory areas
     2.7 - * across the domain/hypervisor boundary.
     2.8 - *
     2.9 - * This program is free software; you can redistribute it and/or modify it
    2.10 - * under the terms and conditions of the GNU General Public License,
    2.11 - * version 2, as published by the Free Software Foundation.
    2.12 - *
    2.13 - * This program is distributed in the hope it will be useful, but WITHOUT
    2.14 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    2.15 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    2.16 - * more details.
    2.17 - *
    2.18 - * You should have received a copy of the GNU General Public License along with
    2.19 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    2.20 - * Place - Suite 330, Boston, MA 02111-1307 USA.
    2.21 - *
    2.22 - * Note:  For vmx enabled environment, poor man's policy is actually
    2.23 - * useless since HV resides in completely different address space as
    2.24 - * domain. So the only way to do the access is search vTLB first, and
    2.25 - * access identity mapped address if hit.
    2.26 - *
    2.27 - * Copyright (c) 2004, Intel Corporation.
    2.28 - * 	Kun Tian (Kevin Tian) (kevin.tian@intel.com)
    2.29 - */
    2.30 -
    2.31 -#ifndef __ASM_IA64_VMX_UACCESS_H__
    2.32 -#define __ASM_IA64_VMX_UACCESS_H__
    2.33 -
    2.34 -#include <xen/compiler.h>
    2.35 -#include <xen/errno.h>
    2.36 -#include <xen/sched.h>
    2.37 -
    2.38 -#include <asm/intrinsics.h>
    2.39 -#include <asm/vmmu.h>
    2.40 -
    2.41 -/* Since HV never accesses domain space directly, most security check can
    2.42 - * be dummy now
    2.43 - */
    2.44 -asm (".section \"__ex_table\", \"a\"\n\t.previous");
    2.45 -
    2.46 -/* For back compatibility */
    2.47 -#define __access_ok(addr, size, segment)	1
    2.48 -#define access_ok(addr, size, segment)	__access_ok((addr), (size), (segment))
    2.49 -
    2.50 -/*
    2.51 - * These are the main single-value transfer routines.  They automatically
    2.52 - * use the right size if we just have the right pointer type.
    2.53 - *
    2.54 - * Careful to not
    2.55 - * (a) re-use the arguments for side effects (sizeof/typeof is ok)
    2.56 - * (b) require any knowledge of processes at this stage
    2.57 - */
    2.58 -#define put_user(x, ptr)	__put_user((x), (ptr))
    2.59 -#define get_user(x, ptr)	__get_user((x), (ptr))
    2.60 -
    2.61 -#define __put_user(x, ptr)	__do_put_user((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
    2.62 -#define __get_user(x, ptr)	__do_get_user((x), (ptr), sizeof(*(ptr)))
    2.63 -
    2.64 -/* TODO: add specific unaligned access later. If assuming aligned at
    2.65 - * 1,2,4,8 bytes by far, it's impossible for operand spaning two
    2.66 - * vTLB entry
    2.67 - */
    2.68 -extern long
    2.69 -__domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len);
    2.70 -
    2.71 -#define __do_put_user(x, ptr, size)					\
    2.72 -({									\
    2.73 -    __typeof__ (x) __pu_x = (x);					\
    2.74 -    __typeof__ (*(ptr)) __user *__pu_ptr = (ptr);			\
    2.75 -    __typeof__ (size) __pu_size = (size);				\
    2.76 -    unsigned long __pu_ma;						\
    2.77 -    long __pu_err;							\
    2.78 -									\
    2.79 -    __pu_err = __domain_va_to_ma((unsigned long)__pu_ptr,		\
    2.80 -				&__pu_ma, &__pu_size);			\
    2.81 -    __pu_err ? (__pu_err = -EFAULT) :					\
    2.82 -    	(*((__typeof__ (*(ptr)) *)__va(__pu_ma)) = x);			\
    2.83 -    __pu_err;								\
    2.84 -})
    2.85 -
    2.86 -#define __do_get_user(x, ptr, size)					\
    2.87 -({									\
    2.88 -    __typeof__ (x) __gu_x = (x);					\
    2.89 -    __typeof__ (*(ptr)) __user *__gu_ptr = (ptr);			\
    2.90 -    __typeof__ (size) __gu_size = (size);				\
    2.91 -    unsigned long __gu_ma;						\
    2.92 -    long __gu_err;							\
    2.93 -									\
    2.94 -    __gu_err = __domain_va_to_ma((unsigned long)__gu_ptr,		\
    2.95 -				&__gu_ma, &__gu_size);			\
    2.96 -    __gu_err ? (__gu_err = -EFAULT) :					\
    2.97 -    	(x = *((__typeof__ (*(ptr)) *)__va(__gu_ma)));			\
    2.98 -    __gu_err;								\
    2.99 -})
   2.100 -
   2.101 -/* More complex copy from domain */
   2.102 -#define copy_from_user(to, from, n)	__copy_from_user((to), (from), (n))
   2.103 -#define copy_to_user(to, from, n)	__copy_to_user((to), (from), (n))
   2.104 -#define clear_user(to, n)		__clear_user((t0), (n))
   2.105 -
   2.106 -static inline unsigned long
   2.107 -__copy_from_user(void *to, void *from, unsigned long n)
   2.108 -{
   2.109 -    unsigned long ma, i;
   2.110 -
   2.111 -    i = n;
   2.112 -    while(!__domain_va_to_ma((unsigned long)from, &ma, &i)) {
   2.113 -	    memcpy(to, (void *)__va(ma), i);
   2.114 -	    n -= i;
   2.115 -        if (!n)
   2.116 -            break;
   2.117 -	    from += i;
   2.118 -	    to += i;
   2.119 -	    i = n;
   2.120 -    }
   2.121 -    return n;
   2.122 -}
   2.123 -
   2.124 -static inline unsigned long
   2.125 -__copy_to_user(void *to, void *from, unsigned long n)
   2.126 -{
   2.127 -    unsigned long ma, i;
   2.128 -
   2.129 -    i = n;
   2.130 -    while(!__domain_va_to_ma((unsigned long)to, &ma, &i)) {
   2.131 -	    memcpy((void *)__va(ma), from, i);
   2.132 -	    n -= i;
   2.133 -        if (!n)
   2.134 -            break;
   2.135 -	    from += i;
   2.136 -	    to += i;
   2.137 -	    i = n;
   2.138 -    }
   2.139 -    return n;
   2.140 -}
   2.141 -
   2.142 -static inline unsigned long
   2.143 -__clear_user(void *to, unsigned long n)
   2.144 -{
   2.145 -    unsigned long ma, i;
   2.146 -
   2.147 -    i = n;
   2.148 -    while(!__domain_va_to_ma((unsigned long)to, &ma, &i)) {
   2.149 -	    memset((void *)__va(ma), 0, i);
   2.150 -	    n -= i;
   2.151 -        if (!n)
   2.152 -            break;
   2.153 -	    to += i;
   2.154 -	    i = n;
   2.155 -    }
   2.156 -    return n;
   2.157 -}
   2.158 -
   2.159 -#endif // __ASM_IA64_VMX_UACCESS_H__