ia64/xen-unstable

annotate xen/common/memory.c @ 19769:2d68d518038b

x86: Allow guests to allocate up to 2MB (superpage) memory extents.

Signed-off-by: Keir Fraser <keir.fraser@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jun 16 14:04:15 2009 +0100 (2009-06-16)
parents f210a633571c
children
rev   line source
kaf24@6486 1 /******************************************************************************
kaf24@6486 2 * memory.c
kaf24@6486 3 *
kaf24@6486 4 * Code to handle memory-related requests.
kaf24@6486 5 *
kaf24@6486 6 * Copyright (c) 2003-2004, B Dragovic
kaf24@6486 7 * Copyright (c) 2003-2005, K A Fraser
kaf24@6486 8 */
kaf24@6486 9
kaf24@6486 10 #include <xen/config.h>
kaf24@6486 11 #include <xen/types.h>
kaf24@6486 12 #include <xen/lib.h>
kaf24@6486 13 #include <xen/mm.h>
kaf24@6486 14 #include <xen/perfc.h>
kaf24@6486 15 #include <xen/sched.h>
kaf24@6486 16 #include <xen/event.h>
Tim@15635 17 #include <xen/paging.h>
kaf24@8468 18 #include <xen/iocap.h>
kaf24@9068 19 #include <xen/guest_access.h>
ack@13295 20 #include <xen/hypercall.h>
kaf24@11219 21 #include <xen/errno.h>
kaf24@6486 22 #include <asm/current.h>
kaf24@6486 23 #include <asm/hardirq.h>
keir@17385 24 #include <xen/numa.h>
kaf24@6486 25 #include <public/memory.h>
kfraser@15815 26 #include <xsm/xsm.h>
kaf24@6486 27
kfraser@12374 28 struct memop_args {
kfraser@12374 29 /* INPUT */
kfraser@12374 30 struct domain *domain; /* Domain to be affected. */
kfraser@12374 31 XEN_GUEST_HANDLE(xen_pfn_t) extent_list; /* List of extent base addrs. */
kfraser@12374 32 unsigned int nr_extents; /* Number of extents to allocate or free. */
kfraser@12374 33 unsigned int extent_order; /* Size of each extent. */
kfraser@12374 34 unsigned int memflags; /* Allocation flags. */
kfraser@12374 35
kfraser@12374 36 /* INPUT/OUTPUT */
kfraser@12374 37 unsigned int nr_done; /* Number of extents processed so far. */
kfraser@12374 38 int preempted; /* Was the hypercall preempted? */
kfraser@12374 39 };
kfraser@12374 40
kfraser@12374 41 static void increase_reservation(struct memop_args *a)
kaf24@6486 42 {
kaf24@8726 43 struct page_info *page;
kaf24@10314 44 unsigned long i;
kaf24@10314 45 xen_pfn_t mfn;
kfraser@12374 46 struct domain *d = a->domain;
kaf24@6486 47
kfraser@12374 48 if ( !guest_handle_is_null(a->extent_list) &&
keir@17860 49 !guest_handle_subrange_okay(a->extent_list, a->nr_done,
keir@17860 50 a->nr_extents-1) )
kfraser@12374 51 return;
kaf24@6486 52
keir@19769 53 if ( !multipage_allocation_permitted(current->domain, a->extent_order) )
kfraser@12374 54 return;
kaf24@6486 55
kfraser@12374 56 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@6486 57 {
kaf24@6486 58 if ( hypercall_preempt_check() )
kaf24@6607 59 {
kfraser@12374 60 a->preempted = 1;
kfraser@12374 61 goto out;
kaf24@6607 62 }
kaf24@6486 63
keir@17986 64 page = alloc_domheap_pages(d, a->extent_order, a->memflags);
kfraser@12374 65 if ( unlikely(page == NULL) )
kaf24@6486 66 {
kaf24@12038 67 gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
kfraser@10418 68 "id=%d memflags=%x (%ld of %d)\n",
kfraser@12374 69 a->extent_order, d->domain_id, a->memflags,
kfraser@12374 70 i, a->nr_extents);
kfraser@12374 71 goto out;
kaf24@6486 72 }
kaf24@6486 73
kaf24@6486 74 /* Inform the domain of the new page's machine address. */
kfraser@12374 75 if ( !guest_handle_is_null(a->extent_list) )
kaf24@8859 76 {
kaf24@8859 77 mfn = page_to_mfn(page);
kfraser@12374 78 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
kfraser@12374 79 goto out;
kaf24@8859 80 }
kaf24@6486 81 }
kaf24@6486 82
kfraser@12374 83 out:
kfraser@12374 84 a->nr_done = i;
kaf24@6486 85 }
sos22@8688 86
kfraser@12374 87 static void populate_physmap(struct memop_args *a)
kaf24@8673 88 {
kaf24@8726 89 struct page_info *page;
kaf24@10314 90 unsigned long i, j;
kfraser@12374 91 xen_pfn_t gpfn, mfn;
kfraser@12374 92 struct domain *d = a->domain;
kaf24@8673 93
keir@17860 94 if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
keir@17860 95 a->nr_extents-1) )
kfraser@12374 96 return;
kaf24@8673 97
keir@19769 98 if ( !multipage_allocation_permitted(current->domain, a->extent_order) )
kfraser@12374 99 return;
kaf24@8673 100
kfraser@12374 101 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@8673 102 {
kaf24@8673 103 if ( hypercall_preempt_check() )
kaf24@8673 104 {
kfraser@12374 105 a->preempted = 1;
sos22@8688 106 goto out;
kaf24@8673 107 }
kaf24@8673 108
kfraser@12374 109 if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
kaf24@8859 110 goto out;
kaf24@8859 111
keir@18975 112 if ( a->memflags & MEMF_populate_on_demand )
kaf24@8673 113 {
keir@18975 114 if ( guest_physmap_mark_populate_on_demand(d, gpfn,
keir@18975 115 a->extent_order) < 0 )
keir@18975 116 goto out;
kaf24@8673 117 }
keir@18975 118 else
keir@18975 119 {
keir@18975 120 page = alloc_domheap_pages(d, a->extent_order, a->memflags);
keir@18975 121 if ( unlikely(page == NULL) )
keir@18975 122 {
keir@18975 123 gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
keir@18975 124 "id=%d memflags=%x (%ld of %d)\n",
keir@18975 125 a->extent_order, d->domain_id, a->memflags,
keir@18975 126 i, a->nr_extents);
keir@18975 127 goto out;
keir@18975 128 }
kaf24@8673 129
keir@18975 130 mfn = page_to_mfn(page);
keir@18975 131 guest_physmap_add_page(d, gpfn, mfn, a->extent_order);
kaf24@8673 132
keir@18975 133 if ( !paging_mode_translate(d) )
keir@18975 134 {
keir@18975 135 for ( j = 0; j < (1 << a->extent_order); j++ )
keir@18975 136 set_gpfn_from_mfn(mfn + j, gpfn + j);
kaf24@8673 137
keir@18975 138 /* Inform the domain of the new page's machine address. */
keir@18975 139 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
keir@18975 140 goto out;
keir@18975 141 }
sos22@8688 142 }
kaf24@8673 143 }
kaf24@8673 144
keir@18975 145 out:
kfraser@12374 146 a->nr_done = i;
kaf24@8673 147 }
cl349@9211 148
kfraser@12374 149 int guest_remove_page(struct domain *d, unsigned long gmfn)
cl349@9211 150 {
cl349@9211 151 struct page_info *page;
cl349@9211 152 unsigned long mfn;
cl349@9211 153
cl349@9211 154 mfn = gmfn_to_mfn(d, gmfn);
cl349@9211 155 if ( unlikely(!mfn_valid(mfn)) )
cl349@9211 156 {
kaf24@12038 157 gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
tdeegan@11172 158 d->domain_id, gmfn);
cl349@9211 159 return 0;
cl349@9211 160 }
cl349@9211 161
cl349@9211 162 page = mfn_to_page(mfn);
cl349@9211 163 if ( unlikely(!get_page(page, d)) )
cl349@9211 164 {
kaf24@12038 165 gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id);
cl349@9211 166 return 0;
cl349@9211 167 }
cl349@9211 168
cl349@9211 169 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
cl349@9211 170 put_page_and_type(page);
cl349@9211 171
cl349@9211 172 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
cl349@9211 173 put_page(page);
cl349@9211 174
keir@17664 175 guest_physmap_remove_page(d, gmfn, mfn, 0);
cl349@9211 176
cl349@9211 177 put_page(page);
cl349@9211 178
cl349@9211 179 return 1;
cl349@9211 180 }
cl349@9211 181
kfraser@12374 182 static void decrease_reservation(struct memop_args *a)
kaf24@6486 183 {
kaf24@10314 184 unsigned long i, j;
kaf24@10314 185 xen_pfn_t gmfn;
kaf24@6486 186
keir@17860 187 if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
keir@17860 188 a->nr_extents-1) )
kfraser@12374 189 return;
kaf24@6486 190
kfraser@12374 191 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@6486 192 {
kaf24@6486 193 if ( hypercall_preempt_check() )
kaf24@6607 194 {
kfraser@12374 195 a->preempted = 1;
kfraser@12374 196 goto out;
kaf24@6607 197 }
kaf24@6486 198
kfraser@12374 199 if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
kfraser@12374 200 goto out;
kaf24@6486 201
keir@18972 202 /* See if populate-on-demand wants to handle this */
keir@18972 203 if ( is_hvm_domain(a->domain)
keir@18972 204 && p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) )
keir@18972 205 continue;
keir@18972 206
kfraser@12374 207 for ( j = 0; j < (1 << a->extent_order); j++ )
kfraser@12374 208 if ( !guest_remove_page(a->domain, gmfn + j) )
kfraser@12374 209 goto out;
kaf24@6486 210 }
kaf24@6486 211
kfraser@12374 212 out:
kfraser@12374 213 a->nr_done = i;
kaf24@6486 214 }
kaf24@6486 215
kfraser@12374 216 static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
kfraser@10418 217 {
kfraser@10418 218 struct xen_memory_exchange exch;
keir@19134 219 PAGE_LIST_HEAD(in_chunk_list);
keir@19134 220 PAGE_LIST_HEAD(out_chunk_list);
kfraser@10418 221 unsigned long in_chunk_order, out_chunk_order;
kaf24@10459 222 xen_pfn_t gpfn, gmfn, mfn;
kfraser@10418 223 unsigned long i, j, k;
keir@17988 224 unsigned int node, memflags = 0;
kfraser@10418 225 long rc = 0;
kfraser@10418 226 struct domain *d;
kfraser@10418 227 struct page_info *page;
kfraser@10418 228
kfraser@10418 229 if ( copy_from_guest(&exch, arg, 1) )
kfraser@10418 230 return -EFAULT;
kfraser@10418 231
kfraser@10418 232 /* Various sanity checks. */
kfraser@10418 233 if ( (exch.nr_exchanged > exch.in.nr_extents) ||
kfraser@10418 234 /* Input and output domain identifiers match? */
kfraser@10418 235 (exch.in.domid != exch.out.domid) ||
kfraser@10418 236 /* Sizes of input and output lists do not overflow a long? */
kfraser@10418 237 ((~0UL >> exch.in.extent_order) < exch.in.nr_extents) ||
kfraser@10418 238 ((~0UL >> exch.out.extent_order) < exch.out.nr_extents) ||
kfraser@10418 239 /* Sizes of input and output lists match? */
kfraser@10418 240 ((exch.in.nr_extents << exch.in.extent_order) !=
kfraser@10418 241 (exch.out.nr_extents << exch.out.extent_order)) )
kfraser@10418 242 {
kfraser@10418 243 rc = -EINVAL;
kfraser@10418 244 goto fail_early;
kfraser@10418 245 }
kfraser@10418 246
kfraser@10418 247 /* Only privileged guests can allocate multi-page contiguous extents. */
keir@19769 248 if ( !multipage_allocation_permitted(current->domain,
keir@19769 249 exch.in.extent_order) ||
keir@19769 250 !multipage_allocation_permitted(current->domain,
keir@19769 251 exch.out.extent_order) )
kfraser@10418 252 {
kfraser@10418 253 rc = -EPERM;
kfraser@10418 254 goto fail_early;
kfraser@10418 255 }
kfraser@10418 256
kfraser@10418 257 if ( exch.in.extent_order <= exch.out.extent_order )
kfraser@10418 258 {
kfraser@10418 259 in_chunk_order = exch.out.extent_order - exch.in.extent_order;
kfraser@10418 260 out_chunk_order = 0;
kfraser@10418 261 }
kfraser@10418 262 else
kfraser@10418 263 {
kfraser@10418 264 in_chunk_order = 0;
kfraser@10418 265 out_chunk_order = exch.in.extent_order - exch.out.extent_order;
kfraser@10418 266 }
kfraser@10418 267
kfraser@10418 268 /*
kfraser@10418 269 * Only support exchange on calling domain right now. Otherwise there are
kfraser@14642 270 * tricky corner cases to consider (e.g., dying domain).
kfraser@10418 271 */
kfraser@10418 272 if ( unlikely(exch.in.domid != DOMID_SELF) )
kfraser@10418 273 {
kfraser@10418 274 rc = IS_PRIV(current->domain) ? -EINVAL : -EPERM;
kfraser@10418 275 goto fail_early;
kfraser@10418 276 }
kfraser@10418 277 d = current->domain;
kfraser@10418 278
keir@16548 279 memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
keir@17986 280 d,
keir@17986 281 XENMEMF_get_address_bits(exch.out.mem_flags) ? :
keir@17986 282 (BITS_PER_LONG+PAGE_SHIFT)));
keir@17988 283 node = XENMEMF_get_node(exch.out.mem_flags);
keir@17988 284 if ( node == NUMA_NO_NODE )
keir@17988 285 node = domain_to_node(d);
keir@17988 286 memflags |= MEMF_node(node);
kfraser@11973 287
kfraser@12374 288 for ( i = (exch.nr_exchanged >> in_chunk_order);
kfraser@12374 289 i < (exch.in.nr_extents >> in_chunk_order);
kfraser@12374 290 i++ )
kfraser@10418 291 {
kfraser@10418 292 if ( hypercall_preempt_check() )
kfraser@10418 293 {
kfraser@12374 294 exch.nr_exchanged = i << in_chunk_order;
kfraser@10418 295 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 296 return -EFAULT;
kfraser@10418 297 return hypercall_create_continuation(
kfraser@10418 298 __HYPERVISOR_memory_op, "lh", XENMEM_exchange, arg);
kfraser@10418 299 }
kfraser@10418 300
kfraser@10418 301 /* Steal a chunk's worth of input pages from the domain. */
kfraser@10418 302 for ( j = 0; j < (1UL << in_chunk_order); j++ )
kfraser@10418 303 {
kfraser@10418 304 if ( unlikely(__copy_from_guest_offset(
kfraser@10418 305 &gmfn, exch.in.extent_start, (i<<in_chunk_order)+j, 1)) )
kfraser@10418 306 {
kfraser@10418 307 rc = -EFAULT;
kfraser@10418 308 goto fail;
kfraser@10418 309 }
kfraser@10418 310
kfraser@10418 311 for ( k = 0; k < (1UL << exch.in.extent_order); k++ )
kfraser@10418 312 {
kfraser@10418 313 mfn = gmfn_to_mfn(d, gmfn + k);
kfraser@10418 314 if ( unlikely(!mfn_valid(mfn)) )
kfraser@10418 315 {
kfraser@10418 316 rc = -EINVAL;
kfraser@10418 317 goto fail;
kfraser@10418 318 }
kfraser@10418 319
kfraser@10418 320 page = mfn_to_page(mfn);
kfraser@10418 321
kfraser@10418 322 if ( unlikely(steal_page(d, page, MEMF_no_refcount)) )
kfraser@10418 323 {
kfraser@10418 324 rc = -EINVAL;
kfraser@10418 325 goto fail;
kfraser@10418 326 }
kfraser@10418 327
keir@19134 328 page_list_add(page, &in_chunk_list);
kfraser@10418 329 }
kfraser@10418 330 }
kfraser@10418 331
kfraser@10418 332 /* Allocate a chunk's worth of anonymous output pages. */
kfraser@10418 333 for ( j = 0; j < (1UL << out_chunk_order); j++ )
kfraser@10418 334 {
keir@17385 335 page = alloc_domheap_pages(NULL, exch.out.extent_order, memflags);
kfraser@10418 336 if ( unlikely(page == NULL) )
kfraser@10418 337 {
kfraser@10418 338 rc = -ENOMEM;
kfraser@10418 339 goto fail;
kfraser@10418 340 }
kfraser@10418 341
keir@19134 342 page_list_add(page, &out_chunk_list);
kfraser@10418 343 }
kfraser@10418 344
kfraser@10418 345 /*
kfraser@10418 346 * Success! Beyond this point we cannot fail for this chunk.
kfraser@10418 347 */
kfraser@10418 348
kfraser@10418 349 /* Destroy final reference to each input page. */
keir@19134 350 while ( (page = page_list_remove_head(&in_chunk_list)) )
kfraser@10418 351 {
kfraser@10418 352 if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
kfraser@10418 353 BUG();
kfraser@10418 354 mfn = page_to_mfn(page);
keir@17664 355 guest_physmap_remove_page(d, mfn_to_gmfn(d, mfn), mfn, 0);
kfraser@10418 356 put_page(page);
kfraser@10418 357 }
kfraser@10418 358
kfraser@10418 359 /* Assign each output page to the domain. */
kfraser@10418 360 j = 0;
keir@19134 361 while ( (page = page_list_remove_head(&out_chunk_list)) )
kfraser@10418 362 {
kfraser@10418 363 if ( assign_pages(d, page, exch.out.extent_order,
kfraser@10418 364 MEMF_no_refcount) )
kfraser@10418 365 BUG();
kfraser@10418 366
kfraser@10418 367 /* Note that we ignore errors accessing the output extent list. */
kfraser@10418 368 (void)__copy_from_guest_offset(
kfraser@10418 369 &gpfn, exch.out.extent_start, (i<<out_chunk_order)+j, 1);
kfraser@10418 370
kfraser@10418 371 mfn = page_to_mfn(page);
keir@17727 372 guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order);
keir@17727 373
keir@17727 374 if ( !paging_mode_translate(d) )
kfraser@10418 375 {
kfraser@10418 376 for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
kfraser@10418 377 set_gpfn_from_mfn(mfn + k, gpfn + k);
kfraser@10418 378 (void)__copy_to_guest_offset(
kfraser@10418 379 exch.out.extent_start, (i<<out_chunk_order)+j, &mfn, 1);
kfraser@10418 380 }
kfraser@10418 381
kfraser@10418 382 j++;
kfraser@10418 383 }
kfraser@10418 384 BUG_ON(j != (1UL << out_chunk_order));
kfraser@10418 385 }
kfraser@10418 386
kfraser@12374 387 exch.nr_exchanged = exch.in.nr_extents;
kfraser@10418 388 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 389 rc = -EFAULT;
kfraser@10418 390 return rc;
kfraser@10418 391
kfraser@10418 392 /*
kfraser@10418 393 * Failed a chunk! Free any partial chunk work. Tell caller how many
kfraser@10418 394 * chunks succeeded.
kfraser@10418 395 */
kfraser@10418 396 fail:
kfraser@10418 397 /* Reassign any input pages we managed to steal. */
keir@19134 398 while ( (page = page_list_remove_head(&in_chunk_list)) )
kfraser@10418 399 if ( assign_pages(d, page, 0, MEMF_no_refcount) )
kfraser@10418 400 BUG();
kfraser@10418 401
kfraser@10418 402 /* Free any output pages we managed to allocate. */
keir@19134 403 while ( (page = page_list_remove_head(&out_chunk_list)) )
kfraser@10418 404 free_domheap_pages(page, exch.out.extent_order);
kfraser@10418 405
kfraser@12374 406 exch.nr_exchanged = i << in_chunk_order;
kfraser@10418 407
kfraser@10418 408 fail_early:
kfraser@10418 409 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 410 rc = -EFAULT;
kfraser@10418 411 return rc;
kfraser@10418 412 }
kfraser@10418 413
kaf24@9873 414 long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE(void) arg)
kaf24@6486 415 {
kaf24@6486 416 struct domain *d;
kfraser@12374 417 int rc, op;
keir@17986 418 unsigned int address_bits;
keir@19041 419 unsigned long start_extent;
kaf24@6486 420 struct xen_memory_reservation reservation;
kfraser@12374 421 struct memop_args args;
kaf24@7959 422 domid_t domid;
kaf24@6486 423
ack@13295 424 op = cmd & MEMOP_CMD_MASK;
kaf24@6486 425
kaf24@6486 426 switch ( op )
kaf24@6486 427 {
kaf24@6486 428 case XENMEM_increase_reservation:
kaf24@6486 429 case XENMEM_decrease_reservation:
kaf24@8673 430 case XENMEM_populate_physmap:
ack@13295 431 start_extent = cmd >> MEMOP_EXTENT_SHIFT;
kfraser@10418 432
kaf24@9068 433 if ( copy_from_guest(&reservation, arg, 1) )
kfraser@10418 434 return start_extent;
kaf24@6486 435
kaf24@8871 436 /* Is size too large for us to encode a continuation? */
ack@13295 437 if ( reservation.nr_extents > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) )
kfraser@10418 438 return start_extent;
kaf24@8871 439
kaf24@6486 440 if ( unlikely(start_extent > reservation.nr_extents) )
kfraser@10418 441 return start_extent;
kaf24@9068 442
kfraser@12374 443 args.extent_list = reservation.extent_start;
kfraser@12374 444 args.nr_extents = reservation.nr_extents;
kfraser@12374 445 args.extent_order = reservation.extent_order;
kfraser@12374 446 args.nr_done = start_extent;
kfraser@12374 447 args.preempted = 0;
kfraser@12374 448 args.memflags = 0;
kaf24@6486 449
keir@17986 450 address_bits = XENMEMF_get_address_bits(reservation.mem_flags);
keir@17986 451 if ( (address_bits != 0) &&
keir@17986 452 (address_bits < (get_order_from_pages(max_page) + PAGE_SHIFT)) )
kaf24@6486 453 {
keir@17986 454 if ( address_bits <= PAGE_SHIFT )
kfraser@10418 455 return start_extent;
keir@17986 456 args.memflags = MEMF_bits(address_bits);
kaf24@6486 457 }
kaf24@6486 458
keir@17986 459 args.memflags |= MEMF_node(XENMEMF_get_node(reservation.mem_flags));
keir@17986 460
keir@18975 461 if ( op == XENMEM_populate_physmap
keir@18975 462 && (reservation.mem_flags & XENMEMF_populate_on_demand) )
keir@18975 463 args.memflags |= MEMF_populate_on_demand;
keir@18975 464
kaf24@6486 465 if ( likely(reservation.domid == DOMID_SELF) )
keir@17349 466 {
keir@17349 467 d = rcu_lock_current_domain();
keir@17349 468 }
keir@17349 469 else
keir@17349 470 {
keir@17349 471 if ( (d = rcu_lock_domain_by_id(reservation.domid)) == NULL )
keir@16856 472 return start_extent;
keir@17349 473 if ( !IS_PRIV_FOR(current->domain, d) )
keir@17349 474 {
keir@16856 475 rcu_unlock_domain(d);
keir@16856 476 return start_extent;
keir@16856 477 }
keir@16856 478 }
kfraser@12374 479 args.domain = d;
kaf24@6486 480
kfraser@15815 481 rc = xsm_memory_adjust_reservation(current->domain, d);
kfraser@15815 482 if ( rc )
kfraser@15815 483 {
keir@17349 484 rcu_unlock_domain(d);
kfraser@15815 485 return rc;
kfraser@15815 486 }
kfraser@15815 487
kaf24@8673 488 switch ( op )
kaf24@8673 489 {
kaf24@8673 490 case XENMEM_increase_reservation:
kfraser@12374 491 increase_reservation(&args);
kaf24@8673 492 break;
kaf24@8673 493 case XENMEM_decrease_reservation:
kfraser@12374 494 decrease_reservation(&args);
kaf24@8673 495 break;
kfraser@12374 496 default: /* XENMEM_populate_physmap */
kfraser@12374 497 populate_physmap(&args);
kaf24@8673 498 break;
kaf24@8673 499 }
kaf24@6486 500
keir@17349 501 rcu_unlock_domain(d);
kaf24@6486 502
kfraser@12374 503 rc = args.nr_done;
kaf24@6486 504
kfraser@12374 505 if ( args.preempted )
kaf24@9068 506 return hypercall_create_continuation(
kaf24@9068 507 __HYPERVISOR_memory_op, "lh",
ack@13295 508 op | (rc << MEMOP_EXTENT_SHIFT), arg);
kaf24@6607 509
kaf24@6486 510 break;
kaf24@6486 511
kfraser@10418 512 case XENMEM_exchange:
kfraser@10418 513 rc = memory_exchange(guest_handle_cast(arg, xen_memory_exchange_t));
kfraser@10418 514 break;
kfraser@10418 515
kaf24@6486 516 case XENMEM_maximum_ram_page:
kaf24@7959 517 rc = max_page;
kaf24@7959 518 break;
kaf24@7959 519
kaf24@7959 520 case XENMEM_current_reservation:
kaf24@7959 521 case XENMEM_maximum_reservation:
kfraser@14471 522 case XENMEM_maximum_gpfn:
kaf24@9068 523 if ( copy_from_guest(&domid, arg, 1) )
kaf24@6486 524 return -EFAULT;
kaf24@7959 525
keir@18574 526 rc = rcu_lock_target_domain_by_id(domid, &d);
keir@18574 527 if ( rc )
keir@18574 528 return rc;
kaf24@7959 529
kfraser@15815 530 rc = xsm_memory_stat_reservation(current->domain, d);
kfraser@15815 531 if ( rc )
kfraser@15815 532 {
keir@17349 533 rcu_unlock_domain(d);
kfraser@15815 534 return rc;
kfraser@15815 535 }
kfraser@15815 536
kfraser@14471 537 switch ( op )
kfraser@14471 538 {
kfraser@14471 539 case XENMEM_current_reservation:
kfraser@14471 540 rc = d->tot_pages;
kfraser@14471 541 break;
kfraser@14471 542 case XENMEM_maximum_reservation:
kfraser@14471 543 rc = d->max_pages;
kfraser@14471 544 break;
kfraser@14471 545 default:
kfraser@14471 546 ASSERT(op == XENMEM_maximum_gpfn);
kfraser@14471 547 rc = domain_get_maximum_gpfn(d);
kfraser@14471 548 break;
kfraser@14471 549 }
kaf24@7959 550
keir@17349 551 rcu_unlock_domain(d);
kaf24@7959 552
kaf24@6486 553 break;
kaf24@6486 554
kaf24@6486 555 default:
kaf24@8059 556 rc = arch_memory_op(op, arg);
kaf24@6486 557 break;
kaf24@6486 558 }
kaf24@6486 559
kaf24@6486 560 return rc;
kaf24@6486 561 }
kaf24@6486 562
kaf24@6486 563 /*
kaf24@6486 564 * Local variables:
kaf24@6486 565 * mode: C
kaf24@6486 566 * c-set-style: "BSD"
kaf24@6486 567 * c-basic-offset: 4
kaf24@6486 568 * tab-width: 4
kaf24@6486 569 * indent-tabs-mode: nil
kaf24@6486 570 * End:
kaf24@6486 571 */