ia64/xen-unstable

annotate xen/common/memory.c @ 19134:5848b49b74fc

x86-64: use MFNs for linking together pages on lists

Unless more than 16Tb are going to ever be supported in Xen, this will
allow reducing the linked list entries in struct page_info from 16 to
8 bytes.

This doesn't modify struct shadow_page_info, yet, so in order to meet
the constraints of that 'mirror' structure the list entry gets
artificially forced to be 16 bytes in size. That workaround will be
removed in a subsequent patch.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jan 30 11:03:28 2009 +0000 (2009-01-30)
parents 2737293c761e
children db20b819679c
rev   line source
kaf24@6486 1 /******************************************************************************
kaf24@6486 2 * memory.c
kaf24@6486 3 *
kaf24@6486 4 * Code to handle memory-related requests.
kaf24@6486 5 *
kaf24@6486 6 * Copyright (c) 2003-2004, B Dragovic
kaf24@6486 7 * Copyright (c) 2003-2005, K A Fraser
kaf24@6486 8 */
kaf24@6486 9
kaf24@6486 10 #include <xen/config.h>
kaf24@6486 11 #include <xen/types.h>
kaf24@6486 12 #include <xen/lib.h>
kaf24@6486 13 #include <xen/mm.h>
kaf24@6486 14 #include <xen/perfc.h>
kaf24@6486 15 #include <xen/sched.h>
kaf24@6486 16 #include <xen/event.h>
Tim@15635 17 #include <xen/paging.h>
kaf24@8468 18 #include <xen/iocap.h>
kaf24@9068 19 #include <xen/guest_access.h>
ack@13295 20 #include <xen/hypercall.h>
kaf24@11219 21 #include <xen/errno.h>
kaf24@6486 22 #include <asm/current.h>
kaf24@6486 23 #include <asm/hardirq.h>
keir@17385 24 #include <xen/numa.h>
kaf24@6486 25 #include <public/memory.h>
kfraser@15815 26 #include <xsm/xsm.h>
kaf24@6486 27
kfraser@12374 28 struct memop_args {
kfraser@12374 29 /* INPUT */
kfraser@12374 30 struct domain *domain; /* Domain to be affected. */
kfraser@12374 31 XEN_GUEST_HANDLE(xen_pfn_t) extent_list; /* List of extent base addrs. */
kfraser@12374 32 unsigned int nr_extents; /* Number of extents to allocate or free. */
kfraser@12374 33 unsigned int extent_order; /* Size of each extent. */
kfraser@12374 34 unsigned int memflags; /* Allocation flags. */
kfraser@12374 35
kfraser@12374 36 /* INPUT/OUTPUT */
kfraser@12374 37 unsigned int nr_done; /* Number of extents processed so far. */
kfraser@12374 38 int preempted; /* Was the hypercall preempted? */
kfraser@12374 39 };
kfraser@12374 40
kfraser@12374 41 static void increase_reservation(struct memop_args *a)
kaf24@6486 42 {
kaf24@8726 43 struct page_info *page;
kaf24@10314 44 unsigned long i;
kaf24@10314 45 xen_pfn_t mfn;
kfraser@12374 46 struct domain *d = a->domain;
kaf24@6486 47
kfraser@12374 48 if ( !guest_handle_is_null(a->extent_list) &&
keir@17860 49 !guest_handle_subrange_okay(a->extent_list, a->nr_done,
keir@17860 50 a->nr_extents-1) )
kfraser@12374 51 return;
kaf24@6486 52
kfraser@12374 53 if ( (a->extent_order != 0) &&
kaf24@8468 54 !multipage_allocation_permitted(current->domain) )
kfraser@12374 55 return;
kaf24@6486 56
kfraser@12374 57 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@6486 58 {
kaf24@6486 59 if ( hypercall_preempt_check() )
kaf24@6607 60 {
kfraser@12374 61 a->preempted = 1;
kfraser@12374 62 goto out;
kaf24@6607 63 }
kaf24@6486 64
keir@17986 65 page = alloc_domheap_pages(d, a->extent_order, a->memflags);
kfraser@12374 66 if ( unlikely(page == NULL) )
kaf24@6486 67 {
kaf24@12038 68 gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
kfraser@10418 69 "id=%d memflags=%x (%ld of %d)\n",
kfraser@12374 70 a->extent_order, d->domain_id, a->memflags,
kfraser@12374 71 i, a->nr_extents);
kfraser@12374 72 goto out;
kaf24@6486 73 }
kaf24@6486 74
kaf24@6486 75 /* Inform the domain of the new page's machine address. */
kfraser@12374 76 if ( !guest_handle_is_null(a->extent_list) )
kaf24@8859 77 {
kaf24@8859 78 mfn = page_to_mfn(page);
kfraser@12374 79 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
kfraser@12374 80 goto out;
kaf24@8859 81 }
kaf24@6486 82 }
kaf24@6486 83
kfraser@12374 84 out:
kfraser@12374 85 a->nr_done = i;
kaf24@6486 86 }
sos22@8688 87
kfraser@12374 88 static void populate_physmap(struct memop_args *a)
kaf24@8673 89 {
kaf24@8726 90 struct page_info *page;
kaf24@10314 91 unsigned long i, j;
kfraser@12374 92 xen_pfn_t gpfn, mfn;
kfraser@12374 93 struct domain *d = a->domain;
kaf24@8673 94
keir@17860 95 if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
keir@17860 96 a->nr_extents-1) )
kfraser@12374 97 return;
kaf24@8673 98
kfraser@12374 99 if ( (a->extent_order != 0) &&
kaf24@8673 100 !multipage_allocation_permitted(current->domain) )
kfraser@12374 101 return;
kaf24@8673 102
kfraser@12374 103 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@8673 104 {
kaf24@8673 105 if ( hypercall_preempt_check() )
kaf24@8673 106 {
kfraser@12374 107 a->preempted = 1;
sos22@8688 108 goto out;
kaf24@8673 109 }
kaf24@8673 110
kfraser@12374 111 if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
kaf24@8859 112 goto out;
kaf24@8859 113
keir@18975 114 if ( a->memflags & MEMF_populate_on_demand )
kaf24@8673 115 {
keir@18975 116 if ( guest_physmap_mark_populate_on_demand(d, gpfn,
keir@18975 117 a->extent_order) < 0 )
keir@18975 118 goto out;
kaf24@8673 119 }
keir@18975 120 else
keir@18975 121 {
keir@18975 122 page = alloc_domheap_pages(d, a->extent_order, a->memflags);
keir@18975 123 if ( unlikely(page == NULL) )
keir@18975 124 {
keir@18975 125 gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
keir@18975 126 "id=%d memflags=%x (%ld of %d)\n",
keir@18975 127 a->extent_order, d->domain_id, a->memflags,
keir@18975 128 i, a->nr_extents);
keir@18975 129 goto out;
keir@18975 130 }
kaf24@8673 131
keir@18975 132 mfn = page_to_mfn(page);
keir@18975 133 guest_physmap_add_page(d, gpfn, mfn, a->extent_order);
kaf24@8673 134
keir@18975 135 if ( !paging_mode_translate(d) )
keir@18975 136 {
keir@18975 137 for ( j = 0; j < (1 << a->extent_order); j++ )
keir@18975 138 set_gpfn_from_mfn(mfn + j, gpfn + j);
kaf24@8673 139
keir@18975 140 /* Inform the domain of the new page's machine address. */
keir@18975 141 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
keir@18975 142 goto out;
keir@18975 143 }
sos22@8688 144 }
kaf24@8673 145 }
kaf24@8673 146
keir@18975 147 out:
kfraser@12374 148 a->nr_done = i;
kaf24@8673 149 }
cl349@9211 150
kfraser@12374 151 int guest_remove_page(struct domain *d, unsigned long gmfn)
cl349@9211 152 {
cl349@9211 153 struct page_info *page;
cl349@9211 154 unsigned long mfn;
cl349@9211 155
cl349@9211 156 mfn = gmfn_to_mfn(d, gmfn);
cl349@9211 157 if ( unlikely(!mfn_valid(mfn)) )
cl349@9211 158 {
kaf24@12038 159 gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
tdeegan@11172 160 d->domain_id, gmfn);
cl349@9211 161 return 0;
cl349@9211 162 }
cl349@9211 163
cl349@9211 164 page = mfn_to_page(mfn);
cl349@9211 165 if ( unlikely(!get_page(page, d)) )
cl349@9211 166 {
kaf24@12038 167 gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id);
cl349@9211 168 return 0;
cl349@9211 169 }
cl349@9211 170
cl349@9211 171 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
cl349@9211 172 put_page_and_type(page);
cl349@9211 173
cl349@9211 174 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
cl349@9211 175 put_page(page);
cl349@9211 176
keir@17664 177 guest_physmap_remove_page(d, gmfn, mfn, 0);
cl349@9211 178
cl349@9211 179 put_page(page);
cl349@9211 180
cl349@9211 181 return 1;
cl349@9211 182 }
cl349@9211 183
kfraser@12374 184 static void decrease_reservation(struct memop_args *a)
kaf24@6486 185 {
kaf24@10314 186 unsigned long i, j;
kaf24@10314 187 xen_pfn_t gmfn;
kaf24@6486 188
keir@17860 189 if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
keir@17860 190 a->nr_extents-1) )
kfraser@12374 191 return;
kaf24@6486 192
kfraser@12374 193 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@6486 194 {
kaf24@6486 195 if ( hypercall_preempt_check() )
kaf24@6607 196 {
kfraser@12374 197 a->preempted = 1;
kfraser@12374 198 goto out;
kaf24@6607 199 }
kaf24@6486 200
kfraser@12374 201 if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
kfraser@12374 202 goto out;
kaf24@6486 203
keir@18972 204 /* See if populate-on-demand wants to handle this */
keir@18972 205 if ( is_hvm_domain(a->domain)
keir@18972 206 && p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) )
keir@18972 207 continue;
keir@18972 208
kfraser@12374 209 for ( j = 0; j < (1 << a->extent_order); j++ )
kfraser@12374 210 if ( !guest_remove_page(a->domain, gmfn + j) )
kfraser@12374 211 goto out;
kaf24@6486 212 }
kaf24@6486 213
kfraser@12374 214 out:
kfraser@12374 215 a->nr_done = i;
kaf24@6486 216 }
kaf24@6486 217
kfraser@12374 218 static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
kfraser@10418 219 {
kfraser@10418 220 struct xen_memory_exchange exch;
keir@19134 221 PAGE_LIST_HEAD(in_chunk_list);
keir@19134 222 PAGE_LIST_HEAD(out_chunk_list);
kfraser@10418 223 unsigned long in_chunk_order, out_chunk_order;
kaf24@10459 224 xen_pfn_t gpfn, gmfn, mfn;
kfraser@10418 225 unsigned long i, j, k;
keir@17988 226 unsigned int node, memflags = 0;
kfraser@10418 227 long rc = 0;
kfraser@10418 228 struct domain *d;
kfraser@10418 229 struct page_info *page;
kfraser@10418 230
kfraser@10418 231 if ( copy_from_guest(&exch, arg, 1) )
kfraser@10418 232 return -EFAULT;
kfraser@10418 233
kfraser@10418 234 /* Various sanity checks. */
kfraser@10418 235 if ( (exch.nr_exchanged > exch.in.nr_extents) ||
kfraser@10418 236 /* Input and output domain identifiers match? */
kfraser@10418 237 (exch.in.domid != exch.out.domid) ||
kfraser@10418 238 /* Sizes of input and output lists do not overflow a long? */
kfraser@10418 239 ((~0UL >> exch.in.extent_order) < exch.in.nr_extents) ||
kfraser@10418 240 ((~0UL >> exch.out.extent_order) < exch.out.nr_extents) ||
kfraser@10418 241 /* Sizes of input and output lists match? */
kfraser@10418 242 ((exch.in.nr_extents << exch.in.extent_order) !=
kfraser@10418 243 (exch.out.nr_extents << exch.out.extent_order)) )
kfraser@10418 244 {
kfraser@10418 245 rc = -EINVAL;
kfraser@10418 246 goto fail_early;
kfraser@10418 247 }
kfraser@10418 248
kfraser@10418 249 /* Only privileged guests can allocate multi-page contiguous extents. */
kfraser@10418 250 if ( ((exch.in.extent_order != 0) || (exch.out.extent_order != 0)) &&
kfraser@10418 251 !multipage_allocation_permitted(current->domain) )
kfraser@10418 252 {
kfraser@10418 253 rc = -EPERM;
kfraser@10418 254 goto fail_early;
kfraser@10418 255 }
kfraser@10418 256
kfraser@10418 257 if ( exch.in.extent_order <= exch.out.extent_order )
kfraser@10418 258 {
kfraser@10418 259 in_chunk_order = exch.out.extent_order - exch.in.extent_order;
kfraser@10418 260 out_chunk_order = 0;
kfraser@10418 261 }
kfraser@10418 262 else
kfraser@10418 263 {
kfraser@10418 264 in_chunk_order = 0;
kfraser@10418 265 out_chunk_order = exch.in.extent_order - exch.out.extent_order;
kfraser@10418 266 }
kfraser@10418 267
kfraser@10418 268 /*
kfraser@10418 269 * Only support exchange on calling domain right now. Otherwise there are
kfraser@14642 270 * tricky corner cases to consider (e.g., dying domain).
kfraser@10418 271 */
kfraser@10418 272 if ( unlikely(exch.in.domid != DOMID_SELF) )
kfraser@10418 273 {
kfraser@10418 274 rc = IS_PRIV(current->domain) ? -EINVAL : -EPERM;
kfraser@10418 275 goto fail_early;
kfraser@10418 276 }
kfraser@10418 277 d = current->domain;
kfraser@10418 278
keir@16548 279 memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
keir@17986 280 d,
keir@17986 281 XENMEMF_get_address_bits(exch.out.mem_flags) ? :
keir@17986 282 (BITS_PER_LONG+PAGE_SHIFT)));
keir@17988 283 node = XENMEMF_get_node(exch.out.mem_flags);
keir@17988 284 if ( node == NUMA_NO_NODE )
keir@17988 285 node = domain_to_node(d);
keir@17988 286 memflags |= MEMF_node(node);
kfraser@11973 287
kfraser@12374 288 for ( i = (exch.nr_exchanged >> in_chunk_order);
kfraser@12374 289 i < (exch.in.nr_extents >> in_chunk_order);
kfraser@12374 290 i++ )
kfraser@10418 291 {
kfraser@10418 292 if ( hypercall_preempt_check() )
kfraser@10418 293 {
kfraser@12374 294 exch.nr_exchanged = i << in_chunk_order;
kfraser@10418 295 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 296 return -EFAULT;
kfraser@10418 297 return hypercall_create_continuation(
kfraser@10418 298 __HYPERVISOR_memory_op, "lh", XENMEM_exchange, arg);
kfraser@10418 299 }
kfraser@10418 300
kfraser@10418 301 /* Steal a chunk's worth of input pages from the domain. */
kfraser@10418 302 for ( j = 0; j < (1UL << in_chunk_order); j++ )
kfraser@10418 303 {
kfraser@10418 304 if ( unlikely(__copy_from_guest_offset(
kfraser@10418 305 &gmfn, exch.in.extent_start, (i<<in_chunk_order)+j, 1)) )
kfraser@10418 306 {
kfraser@10418 307 rc = -EFAULT;
kfraser@10418 308 goto fail;
kfraser@10418 309 }
kfraser@10418 310
kfraser@10418 311 for ( k = 0; k < (1UL << exch.in.extent_order); k++ )
kfraser@10418 312 {
kfraser@10418 313 mfn = gmfn_to_mfn(d, gmfn + k);
kfraser@10418 314 if ( unlikely(!mfn_valid(mfn)) )
kfraser@10418 315 {
kfraser@10418 316 rc = -EINVAL;
kfraser@10418 317 goto fail;
kfraser@10418 318 }
kfraser@10418 319
kfraser@10418 320 page = mfn_to_page(mfn);
kfraser@10418 321
kfraser@10418 322 if ( unlikely(steal_page(d, page, MEMF_no_refcount)) )
kfraser@10418 323 {
kfraser@10418 324 rc = -EINVAL;
kfraser@10418 325 goto fail;
kfraser@10418 326 }
kfraser@10418 327
keir@19134 328 page_list_add(page, &in_chunk_list);
kfraser@10418 329 }
kfraser@10418 330 }
kfraser@10418 331
kfraser@10418 332 /* Allocate a chunk's worth of anonymous output pages. */
kfraser@10418 333 for ( j = 0; j < (1UL << out_chunk_order); j++ )
kfraser@10418 334 {
keir@17385 335 page = alloc_domheap_pages(NULL, exch.out.extent_order, memflags);
kfraser@10418 336 if ( unlikely(page == NULL) )
kfraser@10418 337 {
kfraser@10418 338 rc = -ENOMEM;
kfraser@10418 339 goto fail;
kfraser@10418 340 }
kfraser@10418 341
keir@19134 342 page_list_add(page, &out_chunk_list);
kfraser@10418 343 }
kfraser@10418 344
kfraser@10418 345 /*
kfraser@10418 346 * Success! Beyond this point we cannot fail for this chunk.
kfraser@10418 347 */
kfraser@10418 348
kfraser@10418 349 /* Destroy final reference to each input page. */
keir@19134 350 while ( (page = page_list_remove_head(&in_chunk_list)) )
kfraser@10418 351 {
kfraser@10418 352 if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
kfraser@10418 353 BUG();
kfraser@10418 354 mfn = page_to_mfn(page);
keir@17664 355 guest_physmap_remove_page(d, mfn_to_gmfn(d, mfn), mfn, 0);
kfraser@10418 356 put_page(page);
kfraser@10418 357 }
kfraser@10418 358
kfraser@10418 359 /* Assign each output page to the domain. */
kfraser@10418 360 j = 0;
keir@19134 361 while ( (page = page_list_remove_head(&out_chunk_list)) )
kfraser@10418 362 {
kfraser@10418 363 if ( assign_pages(d, page, exch.out.extent_order,
kfraser@10418 364 MEMF_no_refcount) )
kfraser@10418 365 BUG();
kfraser@10418 366
kfraser@10418 367 /* Note that we ignore errors accessing the output extent list. */
kfraser@10418 368 (void)__copy_from_guest_offset(
kfraser@10418 369 &gpfn, exch.out.extent_start, (i<<out_chunk_order)+j, 1);
kfraser@10418 370
kfraser@10418 371 mfn = page_to_mfn(page);
keir@17727 372 guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order);
keir@17727 373
keir@17727 374 if ( !paging_mode_translate(d) )
kfraser@10418 375 {
kfraser@10418 376 for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
kfraser@10418 377 set_gpfn_from_mfn(mfn + k, gpfn + k);
kfraser@10418 378 (void)__copy_to_guest_offset(
kfraser@10418 379 exch.out.extent_start, (i<<out_chunk_order)+j, &mfn, 1);
kfraser@10418 380 }
kfraser@10418 381
kfraser@10418 382 j++;
kfraser@10418 383 }
kfraser@10418 384 BUG_ON(j != (1UL << out_chunk_order));
kfraser@10418 385 }
kfraser@10418 386
kfraser@12374 387 exch.nr_exchanged = exch.in.nr_extents;
kfraser@10418 388 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 389 rc = -EFAULT;
kfraser@10418 390 return rc;
kfraser@10418 391
kfraser@10418 392 /*
kfraser@10418 393 * Failed a chunk! Free any partial chunk work. Tell caller how many
kfraser@10418 394 * chunks succeeded.
kfraser@10418 395 */
kfraser@10418 396 fail:
kfraser@10418 397 /* Reassign any input pages we managed to steal. */
keir@19134 398 while ( (page = page_list_remove_head(&in_chunk_list)) )
kfraser@10418 399 if ( assign_pages(d, page, 0, MEMF_no_refcount) )
kfraser@10418 400 BUG();
kfraser@10418 401
kfraser@10418 402 /* Free any output pages we managed to allocate. */
keir@19134 403 while ( (page = page_list_remove_head(&out_chunk_list)) )
kfraser@10418 404 free_domheap_pages(page, exch.out.extent_order);
kfraser@10418 405
kfraser@12374 406 exch.nr_exchanged = i << in_chunk_order;
kfraser@10418 407
kfraser@10418 408 fail_early:
kfraser@10418 409 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 410 rc = -EFAULT;
kfraser@10418 411 return rc;
kfraser@10418 412 }
kfraser@10418 413
kaf24@9873 414 long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE(void) arg)
kaf24@6486 415 {
kaf24@6486 416 struct domain *d;
kfraser@12374 417 int rc, op;
keir@17986 418 unsigned int address_bits;
keir@19041 419 unsigned long start_extent;
kaf24@6486 420 struct xen_memory_reservation reservation;
kfraser@12374 421 struct memop_args args;
kaf24@7959 422 domid_t domid;
kaf24@6486 423
ack@13295 424 op = cmd & MEMOP_CMD_MASK;
kaf24@6486 425
kaf24@6486 426 switch ( op )
kaf24@6486 427 {
kaf24@6486 428 case XENMEM_increase_reservation:
kaf24@6486 429 case XENMEM_decrease_reservation:
kaf24@8673 430 case XENMEM_populate_physmap:
ack@13295 431 start_extent = cmd >> MEMOP_EXTENT_SHIFT;
kfraser@10418 432
kaf24@9068 433 if ( copy_from_guest(&reservation, arg, 1) )
kfraser@10418 434 return start_extent;
kaf24@6486 435
kaf24@8871 436 /* Is size too large for us to encode a continuation? */
ack@13295 437 if ( reservation.nr_extents > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) )
kfraser@10418 438 return start_extent;
kaf24@8871 439
kaf24@6486 440 if ( unlikely(start_extent > reservation.nr_extents) )
kfraser@10418 441 return start_extent;
kaf24@9068 442
kfraser@12374 443 args.extent_list = reservation.extent_start;
kfraser@12374 444 args.nr_extents = reservation.nr_extents;
kfraser@12374 445 args.extent_order = reservation.extent_order;
kfraser@12374 446 args.nr_done = start_extent;
kfraser@12374 447 args.preempted = 0;
kfraser@12374 448 args.memflags = 0;
kaf24@6486 449
keir@17986 450 address_bits = XENMEMF_get_address_bits(reservation.mem_flags);
keir@17986 451 if ( (address_bits != 0) &&
keir@17986 452 (address_bits < (get_order_from_pages(max_page) + PAGE_SHIFT)) )
kaf24@6486 453 {
keir@17986 454 if ( address_bits <= PAGE_SHIFT )
kfraser@10418 455 return start_extent;
keir@17986 456 args.memflags = MEMF_bits(address_bits);
kaf24@6486 457 }
kaf24@6486 458
keir@17986 459 args.memflags |= MEMF_node(XENMEMF_get_node(reservation.mem_flags));
keir@17986 460
keir@18975 461 if ( op == XENMEM_populate_physmap
keir@18975 462 && (reservation.mem_flags & XENMEMF_populate_on_demand) )
keir@18975 463 args.memflags |= MEMF_populate_on_demand;
keir@18975 464
kaf24@6486 465 if ( likely(reservation.domid == DOMID_SELF) )
keir@17349 466 {
keir@17349 467 d = rcu_lock_current_domain();
keir@17349 468 }
keir@17349 469 else
keir@17349 470 {
keir@17349 471 if ( (d = rcu_lock_domain_by_id(reservation.domid)) == NULL )
keir@16856 472 return start_extent;
keir@17349 473 if ( !IS_PRIV_FOR(current->domain, d) )
keir@17349 474 {
keir@16856 475 rcu_unlock_domain(d);
keir@16856 476 return start_extent;
keir@16856 477 }
keir@16856 478 }
kfraser@12374 479 args.domain = d;
kaf24@6486 480
kfraser@15815 481 rc = xsm_memory_adjust_reservation(current->domain, d);
kfraser@15815 482 if ( rc )
kfraser@15815 483 {
keir@17349 484 rcu_unlock_domain(d);
kfraser@15815 485 return rc;
kfraser@15815 486 }
kfraser@15815 487
kaf24@8673 488 switch ( op )
kaf24@8673 489 {
kaf24@8673 490 case XENMEM_increase_reservation:
kfraser@12374 491 increase_reservation(&args);
kaf24@8673 492 break;
kaf24@8673 493 case XENMEM_decrease_reservation:
kfraser@12374 494 decrease_reservation(&args);
kaf24@8673 495 break;
kfraser@12374 496 default: /* XENMEM_populate_physmap */
kfraser@12374 497 populate_physmap(&args);
kaf24@8673 498 break;
kaf24@8673 499 }
kaf24@6486 500
keir@17349 501 rcu_unlock_domain(d);
kaf24@6486 502
kfraser@12374 503 rc = args.nr_done;
kaf24@6486 504
kfraser@12374 505 if ( args.preempted )
kaf24@9068 506 return hypercall_create_continuation(
kaf24@9068 507 __HYPERVISOR_memory_op, "lh",
ack@13295 508 op | (rc << MEMOP_EXTENT_SHIFT), arg);
kaf24@6607 509
kaf24@6486 510 break;
kaf24@6486 511
kfraser@10418 512 case XENMEM_exchange:
kfraser@10418 513 rc = memory_exchange(guest_handle_cast(arg, xen_memory_exchange_t));
kfraser@10418 514 break;
kfraser@10418 515
kaf24@6486 516 case XENMEM_maximum_ram_page:
kaf24@7959 517 rc = max_page;
kaf24@7959 518 break;
kaf24@7959 519
kaf24@7959 520 case XENMEM_current_reservation:
kaf24@7959 521 case XENMEM_maximum_reservation:
kfraser@14471 522 case XENMEM_maximum_gpfn:
kaf24@9068 523 if ( copy_from_guest(&domid, arg, 1) )
kaf24@6486 524 return -EFAULT;
kaf24@7959 525
keir@18574 526 rc = rcu_lock_target_domain_by_id(domid, &d);
keir@18574 527 if ( rc )
keir@18574 528 return rc;
kaf24@7959 529
kfraser@15815 530 rc = xsm_memory_stat_reservation(current->domain, d);
kfraser@15815 531 if ( rc )
kfraser@15815 532 {
keir@17349 533 rcu_unlock_domain(d);
kfraser@15815 534 return rc;
kfraser@15815 535 }
kfraser@15815 536
kfraser@14471 537 switch ( op )
kfraser@14471 538 {
kfraser@14471 539 case XENMEM_current_reservation:
kfraser@14471 540 rc = d->tot_pages;
kfraser@14471 541 break;
kfraser@14471 542 case XENMEM_maximum_reservation:
kfraser@14471 543 rc = d->max_pages;
kfraser@14471 544 break;
kfraser@14471 545 default:
kfraser@14471 546 ASSERT(op == XENMEM_maximum_gpfn);
kfraser@14471 547 rc = domain_get_maximum_gpfn(d);
kfraser@14471 548 break;
kfraser@14471 549 }
kaf24@7959 550
keir@17349 551 rcu_unlock_domain(d);
kaf24@7959 552
kaf24@6486 553 break;
kaf24@6486 554
kaf24@6486 555 default:
kaf24@8059 556 rc = arch_memory_op(op, arg);
kaf24@6486 557 break;
kaf24@6486 558 }
kaf24@6486 559
kaf24@6486 560 return rc;
kaf24@6486 561 }
kaf24@6486 562
kaf24@6486 563 /*
kaf24@6486 564 * Local variables:
kaf24@6486 565 * mode: C
kaf24@6486 566 * c-set-style: "BSD"
kaf24@6486 567 * c-basic-offset: 4
kaf24@6486 568 * tab-width: 4
kaf24@6486 569 * indent-tabs-mode: nil
kaf24@6486 570 * End:
kaf24@6486 571 */