ia64/xen-unstable

changeset 10455:05ab081f3c67

Merge with xen-ia64-unstable.hg.
author kfraser@dhcp93.uk.xensource.com
date Fri Jun 16 18:08:27 2006 +0100 (2006-06-16)
parents 0d1dab1d9b67 61a81fab2a02
children e1ae7b3cb5b7
files
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/i386/mm/hypervisor.c	Fri Jun 16 10:18:54 2006 -0600
     1.2 +++ b/linux-2.6-xen-sparse/arch/i386/mm/hypervisor.c	Fri Jun 16 18:08:27 2006 +0100
     1.3 @@ -263,6 +263,10 @@ static void contiguous_bitmap_clear(
     1.4  	}
     1.5  }
     1.6  
     1.7 +/* Protected by balloon_lock. */
     1.8 +#define MAX_CONTIG_ORDER 7
     1.9 +static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
    1.10 +
    1.11  /* Ensure multi-page extents are contiguous in machine memory. */
    1.12  int xen_create_contiguous_region(
    1.13  	unsigned long vstart, unsigned int order, unsigned int address_bits)
    1.14 @@ -271,13 +275,23 @@ int xen_create_contiguous_region(
    1.15  	pud_t         *pud; 
    1.16  	pmd_t         *pmd;
    1.17  	pte_t         *pte;
    1.18 +	unsigned long *in_frames = discontig_frames, out_frame;
    1.19  	unsigned long  frame, i, flags;
    1.20 -	struct xen_memory_reservation reservation = {
    1.21 -		.nr_extents   = 1,
    1.22 -		.extent_order = 0,
    1.23 -		.domid        = DOMID_SELF
    1.24 +	long           rc;
    1.25 +	int            success;
    1.26 +	struct xen_memory_exchange exchange = {
    1.27 +		.in = {
    1.28 +			.nr_extents   = 1UL << order,
    1.29 +			.extent_order = 0,
    1.30 +			.domid        = DOMID_SELF
    1.31 +		},
    1.32 +		.out = {
    1.33 +			.nr_extents   = 1,
    1.34 +			.extent_order = order,
    1.35 +			.address_bits = address_bits,
    1.36 +			.domid        = DOMID_SELF
    1.37 +		}
    1.38  	};
    1.39 -	set_xen_guest_handle(reservation.extent_start, &frame);
    1.40  
    1.41  	/*
    1.42  	 * Currently an auto-translated guest will not perform I/O, nor will
    1.43 @@ -287,68 +301,73 @@ int xen_create_contiguous_region(
    1.44  	if (xen_feature(XENFEAT_auto_translated_physmap))
    1.45  		return 0;
    1.46  
    1.47 +	if (order > MAX_CONTIG_ORDER)
    1.48 +		return -ENOMEM;
    1.49 +
    1.50 +	set_xen_guest_handle(exchange.in.extent_start, in_frames);
    1.51 +	set_xen_guest_handle(exchange.out.extent_start, &out_frame);
    1.52 +
    1.53  	scrub_pages(vstart, 1 << order);
    1.54  
    1.55  	balloon_lock(flags);
    1.56  
    1.57 -	/* 1. Zap current PTEs, giving away the underlying pages. */
    1.58 -	for (i = 0; i < (1<<order); i++) {
    1.59 +	/* 1. Zap current PTEs, remembering MFNs. */
    1.60 +	for (i = 0; i < (1UL<<order); i++) {
    1.61  		pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
    1.62  		pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
    1.63  		pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
    1.64  		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
    1.65 -		frame = pte_mfn(*pte);
    1.66 -		BUG_ON(HYPERVISOR_update_va_mapping(
    1.67 -			vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
    1.68 +		in_frames[i] = pte_mfn(*pte);
    1.69 +		if (HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE),
    1.70 +						 __pte_ma(0), 0))
    1.71 +			BUG();
    1.72  		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
    1.73  			INVALID_P2M_ENTRY);
    1.74 -		BUG_ON(HYPERVISOR_memory_op(
    1.75 -			XENMEM_decrease_reservation, &reservation) != 1);
    1.76  	}
    1.77  
    1.78  	/* 2. Get a new contiguous memory extent. */
    1.79 -	reservation.extent_order = order;
    1.80 -	reservation.address_bits = address_bits;
    1.81 -	frame = __pa(vstart) >> PAGE_SHIFT;
    1.82 -	if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
    1.83 -				 &reservation) != 1)
    1.84 -		goto fail;
    1.85 -
    1.86 -	/* 3. Map the new extent in place of old pages. */
    1.87 -	for (i = 0; i < (1<<order); i++) {
    1.88 -		BUG_ON(HYPERVISOR_update_va_mapping(
    1.89 -			vstart + (i*PAGE_SIZE),
    1.90 -			pfn_pte_ma(frame+i, PAGE_KERNEL), 0));
    1.91 -		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame+i);
    1.92 +	out_frame = __pa(vstart) >> PAGE_SHIFT;
    1.93 +	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
    1.94 +	success = (exchange.nr_exchanged == (1UL << order));
    1.95 +	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
    1.96 +	BUG_ON(success && (rc != 0));
    1.97 +	if (unlikely(rc == -ENOSYS)) {
    1.98 +		/* Compatibility when XENMEM_exchange is unsupported. */
    1.99 +		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
   1.100 +					 &exchange.in) != (1UL << order))
   1.101 +			BUG();
   1.102 +		success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
   1.103 +						&exchange.out) == 1);
   1.104 +		if (!success) {
   1.105 +			/* Couldn't get special memory: fall back to normal. */
   1.106 +			for (i = 0; i < (1UL<<order); i++)
   1.107 +				in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i;
   1.108 +			if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
   1.109 +						 &exchange.in) != (1UL<<order))
   1.110 +				BUG();
   1.111 +		}
   1.112  	}
   1.113  
   1.114 -	flush_tlb_all();
   1.115 -
   1.116 -	contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
   1.117 -
   1.118 -	balloon_unlock(flags);
   1.119 -
   1.120 -	return 0;
   1.121 -
   1.122 - fail:
   1.123 -	reservation.extent_order = 0;
   1.124 -	reservation.address_bits = 0;
   1.125 -
   1.126 -	for (i = 0; i < (1<<order); i++) {
   1.127 -		frame = (__pa(vstart) >> PAGE_SHIFT) + i;
   1.128 -		BUG_ON(HYPERVISOR_memory_op(
   1.129 -			XENMEM_populate_physmap, &reservation) != 1);
   1.130 -		BUG_ON(HYPERVISOR_update_va_mapping(
   1.131 -			vstart + (i*PAGE_SIZE),
   1.132 -			pfn_pte_ma(frame, PAGE_KERNEL), 0));
   1.133 +	/* 3. Map the new extent in place of old pages. */
   1.134 +	for (i = 0; i < (1UL<<order); i++) {
   1.135 +		frame = success ? (out_frame + i) : in_frames[i];
   1.136 +		if (HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE),
   1.137 +						 pfn_pte_ma(frame,
   1.138 +							    PAGE_KERNEL),
   1.139 +						 0))
   1.140 +			BUG();
   1.141  		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
   1.142  	}
   1.143  
   1.144  	flush_tlb_all();
   1.145  
   1.146 +	if (success)
   1.147 +		contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT,
   1.148 +				      1UL << order);
   1.149 +
   1.150  	balloon_unlock(flags);
   1.151  
   1.152 -	return -ENOMEM;
   1.153 +	return success ? 0 : -ENOMEM;
   1.154  }
   1.155  
   1.156  void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
   1.157 @@ -357,47 +376,79 @@ void xen_destroy_contiguous_region(unsig
   1.158  	pud_t         *pud; 
   1.159  	pmd_t         *pmd;
   1.160  	pte_t         *pte;
   1.161 +	unsigned long *out_frames = discontig_frames, in_frame;
   1.162  	unsigned long  frame, i, flags;
   1.163 -	struct xen_memory_reservation reservation = {
   1.164 -		.nr_extents   = 1,
   1.165 -		.extent_order = 0,
   1.166 -		.domid        = DOMID_SELF
   1.167 +	long           rc;
   1.168 +	int            success;
   1.169 +	struct xen_memory_exchange exchange = {
   1.170 +		.in = {
   1.171 +			.nr_extents   = 1,
   1.172 +			.extent_order = order,
   1.173 +			.domid        = DOMID_SELF
   1.174 +		},
   1.175 +		.out = {
   1.176 +			.nr_extents   = 1UL << order,
   1.177 +			.extent_order = 0,
   1.178 +			.domid        = DOMID_SELF
   1.179 +		}
   1.180  	};
   1.181 -	set_xen_guest_handle(reservation.extent_start, &frame);
   1.182  
   1.183  	if (xen_feature(XENFEAT_auto_translated_physmap) ||
   1.184  	    !test_bit(__pa(vstart) >> PAGE_SHIFT, contiguous_bitmap))
   1.185  		return;
   1.186  
   1.187 +	if (order > MAX_CONTIG_ORDER)
   1.188 +		return;
   1.189 +
   1.190 +	set_xen_guest_handle(exchange.in.extent_start, &in_frame);
   1.191 +	set_xen_guest_handle(exchange.out.extent_start, out_frames);
   1.192 +
   1.193  	scrub_pages(vstart, 1 << order);
   1.194  
   1.195  	balloon_lock(flags);
   1.196  
   1.197  	contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
   1.198  
   1.199 -	/* 1. Zap current PTEs, giving away the underlying pages. */
   1.200 -	for (i = 0; i < (1<<order); i++) {
   1.201 -		pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
   1.202 -		pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
   1.203 -		pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
   1.204 -		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
   1.205 -		frame = pte_mfn(*pte);
   1.206 -		BUG_ON(HYPERVISOR_update_va_mapping(
   1.207 -			vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
   1.208 +	/* 1. Find start MFN of contiguous extent. */
   1.209 +	pgd = pgd_offset_k(vstart);
   1.210 +	pud = pud_offset(pgd, vstart);
   1.211 +	pmd = pmd_offset(pud, vstart);
   1.212 +	pte = pte_offset_kernel(pmd, vstart);
   1.213 +	in_frame = pte_mfn(*pte);
   1.214 +
   1.215 +	/* 2. Zap current PTEs. */
   1.216 +	for (i = 0; i < (1UL<<order); i++) {
   1.217 +		if (HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE),
   1.218 +						 __pte_ma(0), 0));
   1.219  		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
   1.220  			INVALID_P2M_ENTRY);
   1.221 -		BUG_ON(HYPERVISOR_memory_op(
   1.222 -			XENMEM_decrease_reservation, &reservation) != 1);
   1.223 +		out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i;
   1.224  	}
   1.225  
   1.226 -	/* 2. Map new pages in place of old pages. */
   1.227 -	for (i = 0; i < (1<<order); i++) {
   1.228 -		frame = (__pa(vstart) >> PAGE_SHIFT) + i;
   1.229 -		BUG_ON(HYPERVISOR_memory_op(
   1.230 -			XENMEM_populate_physmap, &reservation) != 1);
   1.231 -		BUG_ON(HYPERVISOR_update_va_mapping(
   1.232 -			vstart + (i*PAGE_SIZE),
   1.233 -			pfn_pte_ma(frame, PAGE_KERNEL), 0));
   1.234 +	/* 3. Do the exchange for non-contiguous MFNs. */
   1.235 +	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
   1.236 +	success = (exchange.nr_exchanged == 1);
   1.237 +	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
   1.238 +	BUG_ON(success && (rc != 0));
   1.239 +	if (rc == -ENOSYS) {
   1.240 +		/* Compatibility when XENMEM_exchange is unsupported. */
   1.241 +		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
   1.242 +					 &exchange.in) != 1)
   1.243 +			BUG();
   1.244 +		if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
   1.245 +					 &exchange.out) != (1UL << order))
   1.246 +			BUG();
   1.247 +		success = 1;
   1.248 +	}
   1.249 +
   1.250 +	/* 4. Map new pages in place of old pages. */
   1.251 +	for (i = 0; i < (1UL<<order); i++) {
   1.252 +		frame = success ? out_frames[i] : (in_frame + i);
   1.253 +		if (HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE),
   1.254 +						 pfn_pte_ma(frame,
   1.255 +							    PAGE_KERNEL),
   1.256 +						 0))
   1.257 +			BUG();
   1.258  		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
   1.259  	}
   1.260  
     2.1 --- a/xen/arch/x86/mm.c	Fri Jun 16 10:18:54 2006 -0600
     2.2 +++ b/xen/arch/x86/mm.c	Fri Jun 16 18:08:27 2006 +0100
     2.3 @@ -1167,6 +1167,9 @@ static inline int update_l1e(l1_pgentry_
     2.4                               l1_pgentry_t  ol1e, 
     2.5                               l1_pgentry_t  nl1e)
     2.6  {
     2.7 +#ifndef PTE_UPDATE_WITH_CMPXCHG
     2.8 +    return !__copy_to_user(pl1e, &nl1e, sizeof(nl1e));
     2.9 +#else
    2.10      intpte_t o = l1e_get_intpte(ol1e);
    2.11      intpte_t n = l1e_get_intpte(nl1e);
    2.12  
    2.13 @@ -1181,6 +1184,7 @@ static inline int update_l1e(l1_pgentry_
    2.14          return 0;
    2.15      }
    2.16      return 1;
    2.17 +#endif
    2.18  }
    2.19  
    2.20  
    2.21 @@ -1228,6 +1232,9 @@ static int mod_l1_entry(l1_pgentry_t *pl
    2.22      return 1;
    2.23  }
    2.24  
    2.25 +#ifndef PTE_UPDATE_WITH_CMPXCHG
    2.26 +#define UPDATE_ENTRY(_t,_p,_o,_n) ({ (*(_p) = (_n)); 1; })
    2.27 +#else
    2.28  #define UPDATE_ENTRY(_t,_p,_o,_n) ({                                    \
    2.29      intpte_t __o = cmpxchg((intpte_t *)(_p),                            \
    2.30                             _t ## e_get_intpte(_o),                      \
    2.31 @@ -1239,6 +1246,7 @@ static int mod_l1_entry(l1_pgentry_t *pl
    2.32                  (_t ## e_get_intpte(_n)),                               \
    2.33                  (__o));                                                 \
    2.34      (__o == _t ## e_get_intpte(_o)); })
    2.35 +#endif
    2.36  
    2.37  /* Update the L2 entry at pl2e to new value nl2e. pl2e is within frame pfn. */
    2.38  static int mod_l2_entry(l2_pgentry_t *pl2e, 
    2.39 @@ -2408,8 +2416,8 @@ static int create_grant_pte_mapping(
    2.40          goto failed;
    2.41      }
    2.42  
    2.43 -    if ( __copy_from_user(&ol1e, (l1_pgentry_t *)va, sizeof(ol1e)) ||
    2.44 -         !update_l1e(va, ol1e, _nl1e) )
    2.45 +    ol1e = *(l1_pgentry_t *)va;
    2.46 +    if ( !update_l1e(va, ol1e, _nl1e) )
    2.47      {
    2.48          put_page_type(page);
    2.49          rc = GNTST_general_error;