ia64/xen-unstable

changeset 10452:2ac74e1df3d7

[LINUX] Use new XENMEM_exchange hypercall (where possible)
to provide watertight implementations that should never crash
in ENOMEM situations.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@dhcp93.uk.xensource.com
date Fri Jun 16 14:45:01 2006 +0100 (2006-06-16)
parents ee3d10828937
children 9d46e53c75f7
files linux-2.6-xen-sparse/arch/i386/mm/hypervisor.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/i386/mm/hypervisor.c	Fri Jun 16 14:43:54 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/i386/mm/hypervisor.c	Fri Jun 16 14:45:01 2006 +0100
     1.3 @@ -263,6 +263,10 @@ static void contiguous_bitmap_clear(
     1.4  	}
     1.5  }
     1.6  
     1.7 +/* Protected by balloon_lock. */
     1.8 +#define MAX_CONTIG_ORDER 7
     1.9 +static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
    1.10 +
    1.11  /* Ensure multi-page extents are contiguous in machine memory. */
    1.12  int xen_create_contiguous_region(
    1.13  	unsigned long vstart, unsigned int order, unsigned int address_bits)
    1.14 @@ -271,13 +275,23 @@ int xen_create_contiguous_region(
    1.15  	pud_t         *pud; 
    1.16  	pmd_t         *pmd;
    1.17  	pte_t         *pte;
    1.18 +	unsigned long *in_frames = discontig_frames, out_frame;
    1.19  	unsigned long  frame, i, flags;
    1.20 -	struct xen_memory_reservation reservation = {
    1.21 -		.nr_extents   = 1,
    1.22 -		.extent_order = 0,
    1.23 -		.domid        = DOMID_SELF
    1.24 +	long           rc;
    1.25 +	int            success;
    1.26 +	struct xen_memory_exchange exchange = {
    1.27 +		.in = {
    1.28 +			.nr_extents   = 1UL << order,
    1.29 +			.extent_order = 0,
    1.30 +			.domid        = DOMID_SELF
    1.31 +		},
    1.32 +		.out = {
    1.33 +			.nr_extents   = 1,
    1.34 +			.extent_order = order,
    1.35 +			.address_bits = address_bits,
    1.36 +			.domid        = DOMID_SELF
    1.37 +		}
    1.38  	};
    1.39 -	set_xen_guest_handle(reservation.extent_start, &frame);
    1.40  
    1.41  	/*
    1.42  	 * Currently an auto-translated guest will not perform I/O, nor will
    1.43 @@ -287,68 +301,73 @@ int xen_create_contiguous_region(
    1.44  	if (xen_feature(XENFEAT_auto_translated_physmap))
    1.45  		return 0;
    1.46  
    1.47 +	if (order > MAX_CONTIG_ORDER)
    1.48 +		return -ENOMEM;
    1.49 +
    1.50 +	set_xen_guest_handle(exchange.in.extent_start, in_frames);
    1.51 +	set_xen_guest_handle(exchange.out.extent_start, &out_frame);
    1.52 +
    1.53  	scrub_pages(vstart, 1 << order);
    1.54  
    1.55  	balloon_lock(flags);
    1.56  
    1.57 -	/* 1. Zap current PTEs, giving away the underlying pages. */
    1.58 -	for (i = 0; i < (1<<order); i++) {
    1.59 +	/* 1. Zap current PTEs, remembering MFNs. */
    1.60 +	for (i = 0; i < (1UL<<order); i++) {
    1.61  		pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
    1.62  		pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
    1.63  		pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
    1.64  		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
    1.65 -		frame = pte_mfn(*pte);
    1.66 -		BUG_ON(HYPERVISOR_update_va_mapping(
    1.67 -			vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
    1.68 +		in_frames[i] = pte_mfn(*pte);
    1.69 +		if (HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE),
    1.70 +						 __pte_ma(0), 0))
    1.71 +			BUG();
    1.72  		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
    1.73  			INVALID_P2M_ENTRY);
    1.74 -		BUG_ON(HYPERVISOR_memory_op(
    1.75 -			XENMEM_decrease_reservation, &reservation) != 1);
    1.76  	}
    1.77  
    1.78  	/* 2. Get a new contiguous memory extent. */
    1.79 -	reservation.extent_order = order;
    1.80 -	reservation.address_bits = address_bits;
    1.81 -	frame = __pa(vstart) >> PAGE_SHIFT;
    1.82 -	if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
    1.83 -				 &reservation) != 1)
    1.84 -		goto fail;
    1.85 -
    1.86 -	/* 3. Map the new extent in place of old pages. */
    1.87 -	for (i = 0; i < (1<<order); i++) {
    1.88 -		BUG_ON(HYPERVISOR_update_va_mapping(
    1.89 -			vstart + (i*PAGE_SIZE),
    1.90 -			pfn_pte_ma(frame+i, PAGE_KERNEL), 0));
    1.91 -		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame+i);
    1.92 +	out_frame = __pa(vstart) >> PAGE_SHIFT;
    1.93 +	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
    1.94 +	success = (exchange.nr_exchanged == (1UL << order));
    1.95 +	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
    1.96 +	BUG_ON(success && (rc != 0));
    1.97 +	if (unlikely(rc == -ENOSYS)) {
    1.98 +		/* Compatibility when XENMEM_exchange is unsupported. */
    1.99 +		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
   1.100 +					 &exchange.in) != (1UL << order))
   1.101 +			BUG();
   1.102 +		success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
   1.103 +						&exchange.out) == 1);
   1.104 +		if (!success) {
   1.105 +			/* Couldn't get special memory: fall back to normal. */
   1.106 +			for (i = 0; i < (1UL<<order); i++)
   1.107 +				in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i;
   1.108 +			if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
   1.109 +						 &exchange.in) != (1UL<<order))
   1.110 +				BUG();
   1.111 +		}
   1.112  	}
   1.113  
   1.114 -	flush_tlb_all();
   1.115 -
   1.116 -	contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
   1.117 -
   1.118 -	balloon_unlock(flags);
   1.119 -
   1.120 -	return 0;
   1.121 -
   1.122 - fail:
   1.123 -	reservation.extent_order = 0;
   1.124 -	reservation.address_bits = 0;
   1.125 -
   1.126 -	for (i = 0; i < (1<<order); i++) {
   1.127 -		frame = (__pa(vstart) >> PAGE_SHIFT) + i;
   1.128 -		BUG_ON(HYPERVISOR_memory_op(
   1.129 -			XENMEM_populate_physmap, &reservation) != 1);
   1.130 -		BUG_ON(HYPERVISOR_update_va_mapping(
   1.131 -			vstart + (i*PAGE_SIZE),
   1.132 -			pfn_pte_ma(frame, PAGE_KERNEL), 0));
   1.133 +	/* 3. Map the new extent in place of old pages. */
   1.134 +	for (i = 0; i < (1UL<<order); i++) {
   1.135 +		frame = success ? (out_frame + i) : in_frames[i];
   1.136 +		if (HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE),
   1.137 +						 pfn_pte_ma(frame,
   1.138 +							    PAGE_KERNEL),
   1.139 +						 0))
   1.140 +			BUG();
   1.141  		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
   1.142  	}
   1.143  
   1.144  	flush_tlb_all();
   1.145  
   1.146 +	if (success)
   1.147 +		contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT,
   1.148 +				      1UL << order);
   1.149 +
   1.150  	balloon_unlock(flags);
   1.151  
   1.152 -	return -ENOMEM;
   1.153 +	return success ? 0 : -ENOMEM;
   1.154  }
   1.155  
   1.156  void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
   1.157 @@ -357,47 +376,79 @@ void xen_destroy_contiguous_region(unsig
   1.158  	pud_t         *pud; 
   1.159  	pmd_t         *pmd;
   1.160  	pte_t         *pte;
   1.161 +	unsigned long *out_frames = discontig_frames, in_frame;
   1.162  	unsigned long  frame, i, flags;
   1.163 -	struct xen_memory_reservation reservation = {
   1.164 -		.nr_extents   = 1,
   1.165 -		.extent_order = 0,
   1.166 -		.domid        = DOMID_SELF
   1.167 +	long           rc;
   1.168 +	int            success;
   1.169 +	struct xen_memory_exchange exchange = {
   1.170 +		.in = {
   1.171 +			.nr_extents   = 1,
   1.172 +			.extent_order = order,
   1.173 +			.domid        = DOMID_SELF
   1.174 +		},
   1.175 +		.out = {
   1.176 +			.nr_extents   = 1UL << order,
   1.177 +			.extent_order = 0,
   1.178 +			.domid        = DOMID_SELF
   1.179 +		}
   1.180  	};
   1.181 -	set_xen_guest_handle(reservation.extent_start, &frame);
   1.182  
   1.183  	if (xen_feature(XENFEAT_auto_translated_physmap) ||
   1.184  	    !test_bit(__pa(vstart) >> PAGE_SHIFT, contiguous_bitmap))
   1.185  		return;
   1.186  
   1.187 +	if (order > MAX_CONTIG_ORDER)
   1.188 +		return;
   1.189 +
   1.190 +	set_xen_guest_handle(exchange.in.extent_start, &in_frame);
   1.191 +	set_xen_guest_handle(exchange.out.extent_start, out_frames);
   1.192 +
   1.193  	scrub_pages(vstart, 1 << order);
   1.194  
   1.195  	balloon_lock(flags);
   1.196  
   1.197  	contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
   1.198  
   1.199 -	/* 1. Zap current PTEs, giving away the underlying pages. */
   1.200 -	for (i = 0; i < (1<<order); i++) {
   1.201 -		pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
   1.202 -		pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
   1.203 -		pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
   1.204 -		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
   1.205 -		frame = pte_mfn(*pte);
   1.206 -		BUG_ON(HYPERVISOR_update_va_mapping(
   1.207 -			vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
   1.208 +	/* 1. Find start MFN of contiguous extent. */
   1.209 +	pgd = pgd_offset_k(vstart);
   1.210 +	pud = pud_offset(pgd, vstart);
   1.211 +	pmd = pmd_offset(pud, vstart);
   1.212 +	pte = pte_offset_kernel(pmd, vstart);
   1.213 +	in_frame = pte_mfn(*pte);
   1.214 +
   1.215 +	/* 2. Zap current PTEs. */
   1.216 +	for (i = 0; i < (1UL<<order); i++) {
   1.217 +		if (HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE),
   1.218 +						 __pte_ma(0), 0));
   1.219  		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
   1.220  			INVALID_P2M_ENTRY);
   1.221 -		BUG_ON(HYPERVISOR_memory_op(
   1.222 -			XENMEM_decrease_reservation, &reservation) != 1);
   1.223 +		out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i;
   1.224  	}
   1.225  
   1.226 -	/* 2. Map new pages in place of old pages. */
   1.227 -	for (i = 0; i < (1<<order); i++) {
   1.228 -		frame = (__pa(vstart) >> PAGE_SHIFT) + i;
   1.229 -		BUG_ON(HYPERVISOR_memory_op(
   1.230 -			XENMEM_populate_physmap, &reservation) != 1);
   1.231 -		BUG_ON(HYPERVISOR_update_va_mapping(
   1.232 -			vstart + (i*PAGE_SIZE),
   1.233 -			pfn_pte_ma(frame, PAGE_KERNEL), 0));
   1.234 +	/* 3. Do the exchange for non-contiguous MFNs. */
   1.235 +	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
   1.236 +	success = (exchange.nr_exchanged == 1);
   1.237 +	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
   1.238 +	BUG_ON(success && (rc != 0));
   1.239 +	if (rc == -ENOSYS) {
   1.240 +		/* Compatibility when XENMEM_exchange is unsupported. */
   1.241 +		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
   1.242 +					 &exchange.in) != 1)
   1.243 +			BUG();
   1.244 +		if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
   1.245 +					 &exchange.out) != (1UL << order))
   1.246 +			BUG();
   1.247 +		success = 1;
   1.248 +	}
   1.249 +
   1.250 +	/* 4. Map new pages in place of old pages. */
   1.251 +	for (i = 0; i < (1UL<<order); i++) {
   1.252 +		frame = success ? out_frames[i] : (in_frame + i);
   1.253 +		if (HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE),
   1.254 +						 pfn_pte_ma(frame,
   1.255 +							    PAGE_KERNEL),
   1.256 +						 0))
   1.257 +			BUG();
   1.258  		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
   1.259  	}
   1.260