ia64/xen-unstable

annotate xen/common/memory.c @ 10418:ee3d10828937

[XEN] New memory_op XENMEM_exchange. Allows atomic
exchange of one memory reservation for another of the
same size, but with different properties.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@dhcp93.uk.xensource.com
date Fri Jun 16 14:43:54 2006 +0100 (2006-06-16)
parents b3d901ba705d
children 716e365377f5
rev   line source
kaf24@6486 1 /******************************************************************************
kaf24@6486 2 * memory.c
kaf24@6486 3 *
kaf24@6486 4 * Code to handle memory-related requests.
kaf24@6486 5 *
kaf24@6486 6 * Copyright (c) 2003-2004, B Dragovic
kaf24@6486 7 * Copyright (c) 2003-2005, K A Fraser
kaf24@6486 8 */
kaf24@6486 9
kaf24@6486 10 #include <xen/config.h>
kaf24@6486 11 #include <xen/types.h>
kaf24@6486 12 #include <xen/lib.h>
kaf24@6486 13 #include <xen/mm.h>
kaf24@6486 14 #include <xen/perfc.h>
kaf24@6486 15 #include <xen/sched.h>
kaf24@6486 16 #include <xen/event.h>
kaf24@6486 17 #include <xen/shadow.h>
kaf24@8468 18 #include <xen/iocap.h>
kaf24@9068 19 #include <xen/guest_access.h>
kaf24@6486 20 #include <asm/current.h>
kaf24@6486 21 #include <asm/hardirq.h>
kaf24@6486 22 #include <public/memory.h>
kaf24@6486 23
kaf24@8871 24 /*
kaf24@8871 25 * To allow safe resume of do_memory_op() after preemption, we need to know
kaf24@8871 26 * at what point in the page list to resume. For this purpose I steal the
kaf24@8871 27 * high-order bits of the @cmd parameter, which are otherwise unused and zero.
kaf24@8871 28 */
kaf24@8871 29 #define START_EXTENT_SHIFT 4 /* cmd[:4] == start_extent */
kaf24@8871 30
kaf24@6486 31 static long
kaf24@6486 32 increase_reservation(
kaf24@6486 33 struct domain *d,
kaf24@10314 34 XEN_GUEST_HANDLE(xen_pfn_t) extent_list,
kaf24@6486 35 unsigned int nr_extents,
kaf24@6486 36 unsigned int extent_order,
kfraser@10418 37 unsigned int memflags,
kaf24@6607 38 int *preempted)
kaf24@6486 39 {
kaf24@8726 40 struct page_info *page;
kaf24@10314 41 unsigned long i;
kaf24@10314 42 xen_pfn_t mfn;
kaf24@6486 43
kaf24@9068 44 if ( !guest_handle_is_null(extent_list) &&
kaf24@9068 45 !guest_handle_okay(extent_list, nr_extents) )
kaf24@6486 46 return 0;
kaf24@6486 47
kaf24@8468 48 if ( (extent_order != 0) &&
kaf24@8468 49 !multipage_allocation_permitted(current->domain) )
kaf24@6486 50 return 0;
kaf24@6486 51
kaf24@6486 52 for ( i = 0; i < nr_extents; i++ )
kaf24@6486 53 {
kaf24@6486 54 if ( hypercall_preempt_check() )
kaf24@6607 55 {
kaf24@6607 56 *preempted = 1;
kaf24@6486 57 return i;
kaf24@6607 58 }
kaf24@6486 59
kaf24@6486 60 if ( unlikely((page = alloc_domheap_pages(
kfraser@10418 61 d, extent_order, memflags)) == NULL) )
kaf24@6486 62 {
kaf24@6752 63 DPRINTK("Could not allocate order=%d extent: "
kfraser@10418 64 "id=%d memflags=%x (%ld of %d)\n",
kfraser@10418 65 extent_order, d->domain_id, memflags, i, nr_extents);
kaf24@6486 66 return i;
kaf24@6486 67 }
kaf24@6486 68
kaf24@6486 69 /* Inform the domain of the new page's machine address. */
kaf24@9068 70 if ( !guest_handle_is_null(extent_list) )
kaf24@8859 71 {
kaf24@8859 72 mfn = page_to_mfn(page);
kaf24@9068 73 if ( unlikely(__copy_to_guest_offset(extent_list, i, &mfn, 1)) )
kaf24@8859 74 return i;
kaf24@8859 75 }
kaf24@6486 76 }
kaf24@6486 77
kaf24@6486 78 return nr_extents;
kaf24@6486 79 }
sos22@8688 80
kaf24@6486 81 static long
kaf24@8673 82 populate_physmap(
kaf24@8673 83 struct domain *d,
kaf24@10314 84 XEN_GUEST_HANDLE(xen_pfn_t) extent_list,
kaf24@9068 85 unsigned int nr_extents,
kaf24@9068 86 unsigned int extent_order,
kfraser@10418 87 unsigned int memflags,
kaf24@9068 88 int *preempted)
kaf24@8673 89 {
kaf24@8726 90 struct page_info *page;
kaf24@10314 91 unsigned long i, j;
kaf24@10314 92 xen_pfn_t gpfn;
kaf24@10314 93 xen_pfn_t mfn;
kaf24@8673 94
kaf24@9068 95 if ( !guest_handle_okay(extent_list, nr_extents) )
kaf24@8673 96 return 0;
kaf24@8673 97
kaf24@8673 98 if ( (extent_order != 0) &&
kaf24@8673 99 !multipage_allocation_permitted(current->domain) )
kaf24@8673 100 return 0;
kaf24@8673 101
kaf24@8673 102 for ( i = 0; i < nr_extents; i++ )
kaf24@8673 103 {
kaf24@8673 104 if ( hypercall_preempt_check() )
kaf24@8673 105 {
kaf24@8673 106 *preempted = 1;
sos22@8688 107 goto out;
kaf24@8673 108 }
kaf24@8673 109
kaf24@9068 110 if ( unlikely(__copy_from_guest_offset(&gpfn, extent_list, i, 1)) )
kaf24@8859 111 goto out;
kaf24@8859 112
kaf24@8673 113 if ( unlikely((page = alloc_domheap_pages(
kfraser@10418 114 d, extent_order, memflags)) == NULL) )
kaf24@8673 115 {
kaf24@8673 116 DPRINTK("Could not allocate order=%d extent: "
kfraser@10418 117 "id=%d memflags=%x (%ld of %d)\n",
kfraser@10418 118 extent_order, d->domain_id, memflags, i, nr_extents);
sos22@8688 119 goto out;
kaf24@8673 120 }
kaf24@8673 121
kaf24@8726 122 mfn = page_to_mfn(page);
kaf24@8673 123
kaf24@8694 124 if ( unlikely(shadow_mode_translate(d)) )
kaf24@8694 125 {
kaf24@8694 126 for ( j = 0; j < (1 << extent_order); j++ )
kaf24@8736 127 guest_physmap_add_page(d, gpfn + j, mfn + j);
sos22@8688 128 }
kaf24@8694 129 else
kaf24@8694 130 {
kaf24@8694 131 for ( j = 0; j < (1 << extent_order); j++ )
kaf24@8736 132 set_gpfn_from_mfn(mfn + j, gpfn + j);
kaf24@8673 133
sos22@8688 134 /* Inform the domain of the new page's machine address. */
kaf24@9068 135 if ( unlikely(__copy_to_guest_offset(extent_list, i, &mfn, 1)) )
sos22@8688 136 goto out;
sos22@8688 137 }
kaf24@8673 138 }
kaf24@8673 139
sos22@8688 140 out:
sos22@8688 141 return i;
kaf24@8673 142 }
cl349@9211 143
cl349@9211 144 int
cl349@9211 145 guest_remove_page(
cl349@9211 146 struct domain *d,
cl349@9211 147 unsigned long gmfn)
cl349@9211 148 {
cl349@9211 149 struct page_info *page;
cl349@9211 150 unsigned long mfn;
cl349@9211 151
cl349@9211 152 mfn = gmfn_to_mfn(d, gmfn);
cl349@9211 153 if ( unlikely(!mfn_valid(mfn)) )
cl349@9211 154 {
cl349@9211 155 DPRINTK("Domain %u page number %lx invalid\n",
cl349@9211 156 d->domain_id, mfn);
cl349@9211 157 return 0;
cl349@9211 158 }
cl349@9211 159
cl349@9211 160 page = mfn_to_page(mfn);
cl349@9211 161 if ( unlikely(!get_page(page, d)) )
cl349@9211 162 {
cl349@9211 163 DPRINTK("Bad page free for domain %u\n", d->domain_id);
cl349@9211 164 return 0;
cl349@9211 165 }
cl349@9211 166
cl349@9211 167 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
cl349@9211 168 put_page_and_type(page);
cl349@9211 169
cl349@9211 170 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
cl349@9211 171 put_page(page);
cl349@9211 172
cl349@9211 173 guest_physmap_remove_page(d, gmfn, mfn);
cl349@9211 174
cl349@9211 175 put_page(page);
cl349@9211 176
cl349@9211 177 return 1;
cl349@9211 178 }
cl349@9211 179
kaf24@8673 180 static long
kaf24@6486 181 decrease_reservation(
kaf24@9068 182 struct domain *d,
kaf24@10314 183 XEN_GUEST_HANDLE(xen_pfn_t) extent_list,
kaf24@6486 184 unsigned int nr_extents,
kaf24@6486 185 unsigned int extent_order,
kaf24@6607 186 int *preempted)
kaf24@6486 187 {
kaf24@10314 188 unsigned long i, j;
kaf24@10314 189 xen_pfn_t gmfn;
kaf24@6486 190
kaf24@9068 191 if ( !guest_handle_okay(extent_list, nr_extents) )
kaf24@6486 192 return 0;
kaf24@6486 193
kaf24@6486 194 for ( i = 0; i < nr_extents; i++ )
kaf24@6486 195 {
kaf24@6486 196 if ( hypercall_preempt_check() )
kaf24@6607 197 {
kaf24@6607 198 *preempted = 1;
kaf24@6486 199 return i;
kaf24@6607 200 }
kaf24@6486 201
kaf24@9068 202 if ( unlikely(__copy_from_guest_offset(&gmfn, extent_list, i, 1)) )
kaf24@6486 203 return i;
kaf24@6486 204
kaf24@6486 205 for ( j = 0; j < (1 << extent_order); j++ )
kaf24@6486 206 {
cl349@9211 207 if ( !guest_remove_page(d, gmfn + j) )
kaf24@6486 208 return i;
kaf24@6486 209 }
kaf24@6486 210 }
kaf24@6486 211
kaf24@6486 212 return nr_extents;
kaf24@6486 213 }
kaf24@6486 214
kaf24@8871 215 static long
kaf24@8871 216 translate_gpfn_list(
kaf24@9873 217 XEN_GUEST_HANDLE(xen_translate_gpfn_list_t) uop, unsigned long *progress)
kaf24@8871 218 {
kaf24@8871 219 struct xen_translate_gpfn_list op;
kaf24@10314 220 unsigned long i;
kaf24@10314 221 xen_pfn_t gpfn;
kaf24@10314 222 xen_pfn_t mfn;
kaf24@8871 223 struct domain *d;
kaf24@6486 224
kaf24@9068 225 if ( copy_from_guest(&op, uop, 1) )
kaf24@8871 226 return -EFAULT;
kaf24@8871 227
kaf24@8871 228 /* Is size too large for us to encode a continuation? */
kaf24@8871 229 if ( op.nr_gpfns > (ULONG_MAX >> START_EXTENT_SHIFT) )
kaf24@8871 230 return -EINVAL;
kaf24@8871 231
kaf24@9068 232 if ( !guest_handle_okay(op.gpfn_list, op.nr_gpfns) ||
kaf24@9068 233 !guest_handle_okay(op.mfn_list, op.nr_gpfns) )
kaf24@8871 234 return -EFAULT;
kaf24@8871 235
kaf24@8871 236 if ( op.domid == DOMID_SELF )
kaf24@8871 237 op.domid = current->domain->domain_id;
kaf24@8871 238 else if ( !IS_PRIV(current->domain) )
kaf24@8871 239 return -EPERM;
kaf24@8871 240
kaf24@8871 241 if ( (d = find_domain_by_id(op.domid)) == NULL )
kaf24@8871 242 return -ESRCH;
kaf24@8871 243
kaf24@8871 244 if ( !shadow_mode_translate(d) )
kaf24@8871 245 {
kaf24@8871 246 put_domain(d);
kaf24@8871 247 return -EINVAL;
kaf24@8871 248 }
kaf24@8871 249
kaf24@8871 250 for ( i = *progress; i < op.nr_gpfns; i++ )
kaf24@8871 251 {
kaf24@8871 252 if ( hypercall_preempt_check() )
kaf24@8871 253 {
kaf24@8871 254 put_domain(d);
kaf24@8871 255 *progress = i;
kaf24@8871 256 return -EAGAIN;
kaf24@8871 257 }
kaf24@8871 258
kaf24@9068 259 if ( unlikely(__copy_from_guest_offset(&gpfn, op.gpfn_list, i, 1)) )
kaf24@8871 260 {
kaf24@8871 261 put_domain(d);
kaf24@8871 262 return -EFAULT;
kaf24@8871 263 }
kaf24@8871 264
kaf24@8871 265 mfn = gmfn_to_mfn(d, gpfn);
kaf24@8871 266
kaf24@9068 267 if ( unlikely(__copy_to_guest_offset(op.mfn_list, i, &mfn, 1)) )
kaf24@8871 268 {
kaf24@8871 269 put_domain(d);
kaf24@8871 270 return -EFAULT;
kaf24@8871 271 }
kaf24@8871 272 }
kaf24@8871 273
kaf24@8871 274 put_domain(d);
kaf24@8871 275 return 0;
kaf24@8871 276 }
kaf24@8871 277
kfraser@10418 278 static long
kfraser@10418 279 memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
kfraser@10418 280 {
kfraser@10418 281 struct xen_memory_exchange exch;
kfraser@10418 282 LIST_HEAD(in_chunk_list);
kfraser@10418 283 LIST_HEAD(out_chunk_list);
kfraser@10418 284 unsigned long in_chunk_order, out_chunk_order;
kfraser@10418 285 unsigned long gpfn, gmfn, mfn;
kfraser@10418 286 unsigned long i, j, k;
kfraser@10418 287 unsigned int memflags = 0;
kfraser@10418 288 long rc = 0;
kfraser@10418 289 struct domain *d;
kfraser@10418 290 struct page_info *page;
kfraser@10418 291
kfraser@10418 292 if ( copy_from_guest(&exch, arg, 1) )
kfraser@10418 293 return -EFAULT;
kfraser@10418 294
kfraser@10418 295 /* Various sanity checks. */
kfraser@10418 296 if ( (exch.nr_exchanged > exch.in.nr_extents) ||
kfraser@10418 297 /* Input and output domain identifiers match? */
kfraser@10418 298 (exch.in.domid != exch.out.domid) ||
kfraser@10418 299 /* Sizes of input and output lists do not overflow a long? */
kfraser@10418 300 ((~0UL >> exch.in.extent_order) < exch.in.nr_extents) ||
kfraser@10418 301 ((~0UL >> exch.out.extent_order) < exch.out.nr_extents) ||
kfraser@10418 302 /* Sizes of input and output lists match? */
kfraser@10418 303 ((exch.in.nr_extents << exch.in.extent_order) !=
kfraser@10418 304 (exch.out.nr_extents << exch.out.extent_order)) )
kfraser@10418 305 {
kfraser@10418 306 rc = -EINVAL;
kfraser@10418 307 goto fail_early;
kfraser@10418 308 }
kfraser@10418 309
kfraser@10418 310 /* Only privileged guests can allocate multi-page contiguous extents. */
kfraser@10418 311 if ( ((exch.in.extent_order != 0) || (exch.out.extent_order != 0)) &&
kfraser@10418 312 !multipage_allocation_permitted(current->domain) )
kfraser@10418 313 {
kfraser@10418 314 rc = -EPERM;
kfraser@10418 315 goto fail_early;
kfraser@10418 316 }
kfraser@10418 317
kfraser@10418 318 if ( (exch.out.address_bits != 0) &&
kfraser@10418 319 (exch.out.address_bits <
kfraser@10418 320 (get_order_from_pages(max_page) + PAGE_SHIFT)) )
kfraser@10418 321 {
kfraser@10418 322 if ( exch.out.address_bits < 31 )
kfraser@10418 323 {
kfraser@10418 324 rc = -ENOMEM;
kfraser@10418 325 goto fail_early;
kfraser@10418 326 }
kfraser@10418 327 memflags = MEMF_dma;
kfraser@10418 328 }
kfraser@10418 329
kfraser@10418 330 guest_handle_add_offset(exch.in.extent_start, exch.nr_exchanged);
kfraser@10418 331 exch.in.nr_extents -= exch.nr_exchanged;
kfraser@10418 332
kfraser@10418 333 if ( exch.in.extent_order <= exch.out.extent_order )
kfraser@10418 334 {
kfraser@10418 335 in_chunk_order = exch.out.extent_order - exch.in.extent_order;
kfraser@10418 336 out_chunk_order = 0;
kfraser@10418 337 guest_handle_add_offset(
kfraser@10418 338 exch.out.extent_start, exch.nr_exchanged >> in_chunk_order);
kfraser@10418 339 exch.out.nr_extents -= exch.nr_exchanged >> in_chunk_order;
kfraser@10418 340 }
kfraser@10418 341 else
kfraser@10418 342 {
kfraser@10418 343 in_chunk_order = 0;
kfraser@10418 344 out_chunk_order = exch.in.extent_order - exch.out.extent_order;
kfraser@10418 345 guest_handle_add_offset(
kfraser@10418 346 exch.out.extent_start, exch.nr_exchanged << out_chunk_order);
kfraser@10418 347 exch.out.nr_extents -= exch.nr_exchanged << out_chunk_order;
kfraser@10418 348 }
kfraser@10418 349
kfraser@10418 350 /*
kfraser@10418 351 * Only support exchange on calling domain right now. Otherwise there are
kfraser@10418 352 * tricky corner cases to consider (e.g., DOMF_dying domain).
kfraser@10418 353 */
kfraser@10418 354 if ( unlikely(exch.in.domid != DOMID_SELF) )
kfraser@10418 355 {
kfraser@10418 356 rc = IS_PRIV(current->domain) ? -EINVAL : -EPERM;
kfraser@10418 357 goto fail_early;
kfraser@10418 358 }
kfraser@10418 359 d = current->domain;
kfraser@10418 360
kfraser@10418 361 for ( i = 0; i < (exch.in.nr_extents >> in_chunk_order); i++ )
kfraser@10418 362 {
kfraser@10418 363 if ( hypercall_preempt_check() )
kfraser@10418 364 {
kfraser@10418 365 exch.nr_exchanged += i << in_chunk_order;
kfraser@10418 366 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 367 return -EFAULT;
kfraser@10418 368 return hypercall_create_continuation(
kfraser@10418 369 __HYPERVISOR_memory_op, "lh", XENMEM_exchange, arg);
kfraser@10418 370 }
kfraser@10418 371
kfraser@10418 372 /* Steal a chunk's worth of input pages from the domain. */
kfraser@10418 373 for ( j = 0; j < (1UL << in_chunk_order); j++ )
kfraser@10418 374 {
kfraser@10418 375 if ( unlikely(__copy_from_guest_offset(
kfraser@10418 376 &gmfn, exch.in.extent_start, (i<<in_chunk_order)+j, 1)) )
kfraser@10418 377 {
kfraser@10418 378 rc = -EFAULT;
kfraser@10418 379 goto fail;
kfraser@10418 380 }
kfraser@10418 381
kfraser@10418 382 for ( k = 0; k < (1UL << exch.in.extent_order); k++ )
kfraser@10418 383 {
kfraser@10418 384 mfn = gmfn_to_mfn(d, gmfn + k);
kfraser@10418 385 if ( unlikely(!mfn_valid(mfn)) )
kfraser@10418 386 {
kfraser@10418 387 rc = -EINVAL;
kfraser@10418 388 goto fail;
kfraser@10418 389 }
kfraser@10418 390
kfraser@10418 391 page = mfn_to_page(mfn);
kfraser@10418 392
kfraser@10418 393 if ( unlikely(steal_page(d, page, MEMF_no_refcount)) )
kfraser@10418 394 {
kfraser@10418 395 rc = -EINVAL;
kfraser@10418 396 goto fail;
kfraser@10418 397 }
kfraser@10418 398
kfraser@10418 399 list_add(&page->list, &in_chunk_list);
kfraser@10418 400 }
kfraser@10418 401 }
kfraser@10418 402
kfraser@10418 403 /* Allocate a chunk's worth of anonymous output pages. */
kfraser@10418 404 for ( j = 0; j < (1UL << out_chunk_order); j++ )
kfraser@10418 405 {
kfraser@10418 406 page = alloc_domheap_pages(
kfraser@10418 407 NULL, exch.out.extent_order, memflags);
kfraser@10418 408 if ( unlikely(page == NULL) )
kfraser@10418 409 {
kfraser@10418 410 rc = -ENOMEM;
kfraser@10418 411 goto fail;
kfraser@10418 412 }
kfraser@10418 413
kfraser@10418 414 list_add(&page->list, &out_chunk_list);
kfraser@10418 415 }
kfraser@10418 416
kfraser@10418 417 /*
kfraser@10418 418 * Success! Beyond this point we cannot fail for this chunk.
kfraser@10418 419 */
kfraser@10418 420
kfraser@10418 421 /* Destroy final reference to each input page. */
kfraser@10418 422 while ( !list_empty(&in_chunk_list) )
kfraser@10418 423 {
kfraser@10418 424 page = list_entry(in_chunk_list.next, struct page_info, list);
kfraser@10418 425 list_del(&page->list);
kfraser@10418 426 if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
kfraser@10418 427 BUG();
kfraser@10418 428 mfn = page_to_mfn(page);
kfraser@10418 429 guest_physmap_remove_page(d, mfn_to_gmfn(d, mfn), mfn);
kfraser@10418 430 put_page(page);
kfraser@10418 431 }
kfraser@10418 432
kfraser@10418 433 /* Assign each output page to the domain. */
kfraser@10418 434 j = 0;
kfraser@10418 435 while ( !list_empty(&out_chunk_list) )
kfraser@10418 436 {
kfraser@10418 437 page = list_entry(out_chunk_list.next, struct page_info, list);
kfraser@10418 438 list_del(&page->list);
kfraser@10418 439 if ( assign_pages(d, page, exch.out.extent_order,
kfraser@10418 440 MEMF_no_refcount) )
kfraser@10418 441 BUG();
kfraser@10418 442
kfraser@10418 443 /* Note that we ignore errors accessing the output extent list. */
kfraser@10418 444 (void)__copy_from_guest_offset(
kfraser@10418 445 &gpfn, exch.out.extent_start, (i<<out_chunk_order)+j, 1);
kfraser@10418 446
kfraser@10418 447 mfn = page_to_mfn(page);
kfraser@10418 448 if ( unlikely(shadow_mode_translate(d)) )
kfraser@10418 449 {
kfraser@10418 450 for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
kfraser@10418 451 guest_physmap_add_page(d, gpfn + k, mfn + k);
kfraser@10418 452 }
kfraser@10418 453 else
kfraser@10418 454 {
kfraser@10418 455 for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
kfraser@10418 456 set_gpfn_from_mfn(mfn + k, gpfn + k);
kfraser@10418 457 (void)__copy_to_guest_offset(
kfraser@10418 458 exch.out.extent_start, (i<<out_chunk_order)+j, &mfn, 1);
kfraser@10418 459 }
kfraser@10418 460
kfraser@10418 461 j++;
kfraser@10418 462 }
kfraser@10418 463 BUG_ON(j != (1UL << out_chunk_order));
kfraser@10418 464 }
kfraser@10418 465
kfraser@10418 466 exch.nr_exchanged += exch.in.nr_extents;
kfraser@10418 467 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 468 rc = -EFAULT;
kfraser@10418 469 return rc;
kfraser@10418 470
kfraser@10418 471 /*
kfraser@10418 472 * Failed a chunk! Free any partial chunk work. Tell caller how many
kfraser@10418 473 * chunks succeeded.
kfraser@10418 474 */
kfraser@10418 475 fail:
kfraser@10418 476 /* Reassign any input pages we managed to steal. */
kfraser@10418 477 while ( !list_empty(&in_chunk_list) )
kfraser@10418 478 {
kfraser@10418 479 page = list_entry(in_chunk_list.next, struct page_info, list);
kfraser@10418 480 list_del(&page->list);
kfraser@10418 481 if ( assign_pages(d, page, 0, MEMF_no_refcount) )
kfraser@10418 482 BUG();
kfraser@10418 483 }
kfraser@10418 484
kfraser@10418 485 /* Free any output pages we managed to allocate. */
kfraser@10418 486 while ( !list_empty(&out_chunk_list) )
kfraser@10418 487 {
kfraser@10418 488 page = list_entry(out_chunk_list.next, struct page_info, list);
kfraser@10418 489 list_del(&page->list);
kfraser@10418 490 free_domheap_pages(page, exch.out.extent_order);
kfraser@10418 491 }
kfraser@10418 492
kfraser@10418 493 exch.nr_exchanged += i << in_chunk_order;
kfraser@10418 494
kfraser@10418 495 fail_early:
kfraser@10418 496 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 497 rc = -EFAULT;
kfraser@10418 498 return rc;
kfraser@10418 499 }
kfraser@10418 500
kaf24@9873 501 long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE(void) arg)
kaf24@6486 502 {
kaf24@6486 503 struct domain *d;
kfraser@10418 504 int rc, op, preempted = 0;
kfraser@10418 505 unsigned int memflags = 0;
kaf24@8871 506 unsigned long start_extent, progress;
kaf24@6486 507 struct xen_memory_reservation reservation;
kaf24@7959 508 domid_t domid;
kaf24@6486 509
kaf24@6486 510 op = cmd & ((1 << START_EXTENT_SHIFT) - 1);
kaf24@6486 511
kaf24@6486 512 switch ( op )
kaf24@6486 513 {
kaf24@6486 514 case XENMEM_increase_reservation:
kaf24@6486 515 case XENMEM_decrease_reservation:
kaf24@8673 516 case XENMEM_populate_physmap:
kfraser@10418 517 start_extent = cmd >> START_EXTENT_SHIFT;
kfraser@10418 518
kaf24@9068 519 if ( copy_from_guest(&reservation, arg, 1) )
kfraser@10418 520 return start_extent;
kaf24@6486 521
kaf24@8871 522 /* Is size too large for us to encode a continuation? */
kaf24@8871 523 if ( reservation.nr_extents > (ULONG_MAX >> START_EXTENT_SHIFT) )
kfraser@10418 524 return start_extent;
kaf24@8871 525
kaf24@6486 526 if ( unlikely(start_extent > reservation.nr_extents) )
kfraser@10418 527 return start_extent;
kaf24@9068 528
kaf24@9068 529 if ( !guest_handle_is_null(reservation.extent_start) )
kaf24@9068 530 guest_handle_add_offset(reservation.extent_start, start_extent);
kaf24@6486 531 reservation.nr_extents -= start_extent;
kaf24@6486 532
kaf24@6701 533 if ( (reservation.address_bits != 0) &&
kaf24@6702 534 (reservation.address_bits <
kaf24@6702 535 (get_order_from_pages(max_page) + PAGE_SHIFT)) )
kaf24@6486 536 {
kaf24@6486 537 if ( reservation.address_bits < 31 )
kfraser@10418 538 return start_extent;
kfraser@10418 539 memflags = MEMF_dma;
kaf24@6486 540 }
kaf24@6486 541
kaf24@6486 542 if ( likely(reservation.domid == DOMID_SELF) )
kaf24@6486 543 d = current->domain;
kfraser@10418 544 else if ( !IS_PRIV(current->domain) ||
kfraser@10418 545 ((d = find_domain_by_id(reservation.domid)) == NULL) )
kfraser@10418 546 return start_extent;
kaf24@6486 547
kaf24@8673 548 switch ( op )
kaf24@8673 549 {
kaf24@8673 550 case XENMEM_increase_reservation:
kaf24@8673 551 rc = increase_reservation(
kaf24@8673 552 d,
kaf24@8673 553 reservation.extent_start,
kaf24@8673 554 reservation.nr_extents,
kaf24@8673 555 reservation.extent_order,
kfraser@10418 556 memflags,
kaf24@8673 557 &preempted);
kaf24@8673 558 break;
kaf24@8673 559 case XENMEM_decrease_reservation:
kaf24@8673 560 rc = decrease_reservation(
kaf24@8673 561 d,
kaf24@8673 562 reservation.extent_start,
kaf24@8673 563 reservation.nr_extents,
kaf24@8673 564 reservation.extent_order,
kaf24@8673 565 &preempted);
kaf24@8673 566 break;
kaf24@8673 567 case XENMEM_populate_physmap:
kaf24@8673 568 default:
kaf24@8673 569 rc = populate_physmap(
kaf24@8673 570 d,
kaf24@8673 571 reservation.extent_start,
kaf24@8673 572 reservation.nr_extents,
kaf24@8673 573 reservation.extent_order,
kfraser@10418 574 memflags,
kaf24@8673 575 &preempted);
kaf24@8673 576 break;
kaf24@8673 577 }
kaf24@6486 578
kaf24@6486 579 if ( unlikely(reservation.domid != DOMID_SELF) )
kaf24@6486 580 put_domain(d);
kaf24@6486 581
kaf24@6486 582 rc += start_extent;
kaf24@6486 583
kaf24@6607 584 if ( preempted )
kaf24@9068 585 return hypercall_create_continuation(
kaf24@9068 586 __HYPERVISOR_memory_op, "lh",
kaf24@9068 587 op | (rc << START_EXTENT_SHIFT), arg);
kaf24@6607 588
kaf24@6486 589 break;
kaf24@6486 590
kfraser@10418 591 case XENMEM_exchange:
kfraser@10418 592 rc = memory_exchange(guest_handle_cast(arg, xen_memory_exchange_t));
kfraser@10418 593 break;
kfraser@10418 594
kaf24@6486 595 case XENMEM_maximum_ram_page:
kaf24@7959 596 rc = max_page;
kaf24@7959 597 break;
kaf24@7959 598
kaf24@7959 599 case XENMEM_current_reservation:
kaf24@7959 600 case XENMEM_maximum_reservation:
kaf24@9068 601 if ( copy_from_guest(&domid, arg, 1) )
kaf24@6486 602 return -EFAULT;
kaf24@7959 603
kaf24@9068 604 if ( likely(domid == DOMID_SELF) )
kaf24@7959 605 d = current->domain;
kaf24@7959 606 else if ( !IS_PRIV(current->domain) )
kaf24@7959 607 return -EPERM;
kaf24@7959 608 else if ( (d = find_domain_by_id(domid)) == NULL )
kaf24@7959 609 return -ESRCH;
kaf24@7959 610
kaf24@7959 611 rc = (op == XENMEM_current_reservation) ? d->tot_pages : d->max_pages;
kaf24@7959 612
kaf24@7959 613 if ( unlikely(domid != DOMID_SELF) )
kaf24@7959 614 put_domain(d);
kaf24@7959 615
kaf24@6486 616 break;
kaf24@6486 617
kaf24@8871 618 case XENMEM_translate_gpfn_list:
kaf24@8871 619 progress = cmd >> START_EXTENT_SHIFT;
kaf24@9068 620 rc = translate_gpfn_list(
kaf24@9068 621 guest_handle_cast(arg, xen_translate_gpfn_list_t),
kaf24@9068 622 &progress);
kaf24@8871 623 if ( rc == -EAGAIN )
kaf24@9068 624 return hypercall_create_continuation(
kaf24@9068 625 __HYPERVISOR_memory_op, "lh",
kaf24@9068 626 op | (progress << START_EXTENT_SHIFT), arg);
kaf24@8871 627 break;
kaf24@8871 628
kaf24@6486 629 default:
kaf24@8059 630 rc = arch_memory_op(op, arg);
kaf24@6486 631 break;
kaf24@6486 632 }
kaf24@6486 633
kaf24@6486 634 return rc;
kaf24@6486 635 }
kaf24@6486 636
kaf24@6486 637 /*
kaf24@6486 638 * Local variables:
kaf24@6486 639 * mode: C
kaf24@6486 640 * c-set-style: "BSD"
kaf24@6486 641 * c-basic-offset: 4
kaf24@6486 642 * tab-width: 4
kaf24@6486 643 * indent-tabs-mode: nil
kaf24@6486 644 * End:
kaf24@6486 645 */