ia64/xen-unstable

annotate xen/common/memory.c @ 13875:3fbe12560ffe

[XEN] When removing pages, drop shadow refs before complaining about refcount.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Thu Feb 08 10:44:53 2007 +0000 (2007-02-08)
parents 271ffb1c12eb
children ee4850bc895b
rev   line source
kaf24@6486 1 /******************************************************************************
kaf24@6486 2 * memory.c
kaf24@6486 3 *
kaf24@6486 4 * Code to handle memory-related requests.
kaf24@6486 5 *
kaf24@6486 6 * Copyright (c) 2003-2004, B Dragovic
kaf24@6486 7 * Copyright (c) 2003-2005, K A Fraser
kaf24@6486 8 */
kaf24@6486 9
kaf24@6486 10 #include <xen/config.h>
kaf24@6486 11 #include <xen/types.h>
kaf24@6486 12 #include <xen/lib.h>
kaf24@6486 13 #include <xen/mm.h>
kaf24@6486 14 #include <xen/perfc.h>
kaf24@6486 15 #include <xen/sched.h>
kaf24@6486 16 #include <xen/event.h>
kaf24@6486 17 #include <xen/shadow.h>
kaf24@8468 18 #include <xen/iocap.h>
kaf24@9068 19 #include <xen/guest_access.h>
ack@13295 20 #include <xen/hypercall.h>
kaf24@11219 21 #include <xen/errno.h>
kaf24@6486 22 #include <asm/current.h>
kaf24@6486 23 #include <asm/hardirq.h>
kaf24@6486 24 #include <public/memory.h>
kaf24@6486 25
kfraser@12374 26 struct memop_args {
kfraser@12374 27 /* INPUT */
kfraser@12374 28 struct domain *domain; /* Domain to be affected. */
kfraser@12374 29 XEN_GUEST_HANDLE(xen_pfn_t) extent_list; /* List of extent base addrs. */
kfraser@12374 30 unsigned int nr_extents; /* Number of extents to allocate or free. */
kfraser@12374 31 unsigned int extent_order; /* Size of each extent. */
kfraser@12374 32 unsigned int memflags; /* Allocation flags. */
kfraser@12374 33
kfraser@12374 34 /* INPUT/OUTPUT */
kfraser@12374 35 unsigned int nr_done; /* Number of extents processed so far. */
kfraser@12374 36 int preempted; /* Was the hypercall preempted? */
kfraser@12374 37 };
kfraser@12374 38
kfraser@12374 39 static unsigned int select_local_cpu(struct domain *d)
kfraser@12374 40 {
kfraser@12374 41 struct vcpu *v = d->vcpu[0];
kfraser@12374 42 return (v ? v->processor : 0);
kfraser@12374 43 }
kfraser@12374 44
kfraser@12374 45 static void increase_reservation(struct memop_args *a)
kaf24@6486 46 {
kaf24@8726 47 struct page_info *page;
kaf24@10314 48 unsigned long i;
kaf24@10314 49 xen_pfn_t mfn;
kfraser@12374 50 struct domain *d = a->domain;
kfraser@12374 51 unsigned int cpu = select_local_cpu(d);
kaf24@6486 52
kfraser@12374 53 if ( !guest_handle_is_null(a->extent_list) &&
kfraser@12374 54 !guest_handle_okay(a->extent_list, a->nr_extents) )
kfraser@12374 55 return;
kaf24@6486 56
kfraser@12374 57 if ( (a->extent_order != 0) &&
kaf24@8468 58 !multipage_allocation_permitted(current->domain) )
kfraser@12374 59 return;
kaf24@6486 60
kfraser@12374 61 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@6486 62 {
kaf24@6486 63 if ( hypercall_preempt_check() )
kaf24@6607 64 {
kfraser@12374 65 a->preempted = 1;
kfraser@12374 66 goto out;
kaf24@6607 67 }
kaf24@6486 68
kfraser@12374 69 page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
kfraser@12374 70 if ( unlikely(page == NULL) )
kaf24@6486 71 {
kaf24@12038 72 gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
kfraser@10418 73 "id=%d memflags=%x (%ld of %d)\n",
kfraser@12374 74 a->extent_order, d->domain_id, a->memflags,
kfraser@12374 75 i, a->nr_extents);
kfraser@12374 76 goto out;
kaf24@6486 77 }
kaf24@6486 78
kaf24@6486 79 /* Inform the domain of the new page's machine address. */
kfraser@12374 80 if ( !guest_handle_is_null(a->extent_list) )
kaf24@8859 81 {
kaf24@8859 82 mfn = page_to_mfn(page);
kfraser@12374 83 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
kfraser@12374 84 goto out;
kaf24@8859 85 }
kaf24@6486 86 }
kaf24@6486 87
kfraser@12374 88 out:
kfraser@12374 89 a->nr_done = i;
kaf24@6486 90 }
sos22@8688 91
kfraser@12374 92 static void populate_physmap(struct memop_args *a)
kaf24@8673 93 {
kaf24@8726 94 struct page_info *page;
kaf24@10314 95 unsigned long i, j;
kfraser@12374 96 xen_pfn_t gpfn, mfn;
kfraser@12374 97 struct domain *d = a->domain;
kfraser@12374 98 unsigned int cpu = select_local_cpu(d);
kaf24@8673 99
kfraser@12374 100 if ( !guest_handle_okay(a->extent_list, a->nr_extents) )
kfraser@12374 101 return;
kaf24@8673 102
kfraser@12374 103 if ( (a->extent_order != 0) &&
kaf24@8673 104 !multipage_allocation_permitted(current->domain) )
kfraser@12374 105 return;
kaf24@8673 106
kfraser@12374 107 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@8673 108 {
kaf24@8673 109 if ( hypercall_preempt_check() )
kaf24@8673 110 {
kfraser@12374 111 a->preempted = 1;
sos22@8688 112 goto out;
kaf24@8673 113 }
kaf24@8673 114
kfraser@12374 115 if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
kaf24@8859 116 goto out;
kaf24@8859 117
kfraser@12374 118 page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
kfraser@12374 119 if ( unlikely(page == NULL) )
kaf24@8673 120 {
kaf24@12038 121 gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
kfraser@12374 122 "id=%d memflags=%x (%ld of %d)\n",
kfraser@12374 123 a->extent_order, d->domain_id, a->memflags,
kfraser@12374 124 i, a->nr_extents);
sos22@8688 125 goto out;
kaf24@8673 126 }
kaf24@8673 127
kaf24@8726 128 mfn = page_to_mfn(page);
kaf24@8673 129
kaf24@8694 130 if ( unlikely(shadow_mode_translate(d)) )
kaf24@8694 131 {
kfraser@12374 132 for ( j = 0; j < (1 << a->extent_order); j++ )
kaf24@8736 133 guest_physmap_add_page(d, gpfn + j, mfn + j);
sos22@8688 134 }
kaf24@8694 135 else
kaf24@8694 136 {
kfraser@12374 137 for ( j = 0; j < (1 << a->extent_order); j++ )
kaf24@8736 138 set_gpfn_from_mfn(mfn + j, gpfn + j);
kaf24@8673 139
sos22@8688 140 /* Inform the domain of the new page's machine address. */
kfraser@12374 141 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
sos22@8688 142 goto out;
sos22@8688 143 }
kaf24@8673 144 }
kaf24@8673 145
sos22@8688 146 out:
kfraser@12374 147 a->nr_done = i;
kaf24@8673 148 }
cl349@9211 149
kfraser@12374 150 int guest_remove_page(struct domain *d, unsigned long gmfn)
cl349@9211 151 {
cl349@9211 152 struct page_info *page;
cl349@9211 153 unsigned long mfn;
cl349@9211 154
cl349@9211 155 mfn = gmfn_to_mfn(d, gmfn);
cl349@9211 156 if ( unlikely(!mfn_valid(mfn)) )
cl349@9211 157 {
kaf24@12038 158 gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
tdeegan@11172 159 d->domain_id, gmfn);
cl349@9211 160 return 0;
cl349@9211 161 }
cl349@9211 162
cl349@9211 163 page = mfn_to_page(mfn);
cl349@9211 164 if ( unlikely(!get_page(page, d)) )
cl349@9211 165 {
kaf24@12038 166 gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id);
cl349@9211 167 return 0;
cl349@9211 168 }
cl349@9211 169
cl349@9211 170 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
cl349@9211 171 put_page_and_type(page);
cl349@9211 172
cl349@9211 173 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
cl349@9211 174 put_page(page);
cl349@9211 175
kfraser@10823 176 if ( unlikely(!page_is_removable(page)) )
kfraser@10584 177 {
Tim@13875 178 shadow_drop_references(d, page);
kfraser@10584 179 /* We'll make this a guest-visible error in future, so take heed! */
Tim@13875 180 if ( !page_is_removable(page) )
Tim@13875 181 gdprintk(XENLOG_INFO, "Dom%d freeing in-use page %lx "
Tim@13875 182 "(pseudophys %lx): count=%lx type=%lx\n",
Tim@13875 183 d->domain_id, mfn, get_gpfn_from_mfn(mfn),
Tim@13875 184 (unsigned long)page->count_info, page->u.inuse.type_info);
kfraser@10584 185 }
kfraser@10584 186
kfraser@11212 187 guest_physmap_remove_page(d, gmfn, mfn);
cl349@9211 188
cl349@9211 189 put_page(page);
cl349@9211 190
cl349@9211 191 return 1;
cl349@9211 192 }
cl349@9211 193
kfraser@12374 194 static void decrease_reservation(struct memop_args *a)
kaf24@6486 195 {
kaf24@10314 196 unsigned long i, j;
kaf24@10314 197 xen_pfn_t gmfn;
kaf24@6486 198
kfraser@12374 199 if ( !guest_handle_okay(a->extent_list, a->nr_extents) )
kfraser@12374 200 return;
kaf24@6486 201
kfraser@12374 202 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@6486 203 {
kaf24@6486 204 if ( hypercall_preempt_check() )
kaf24@6607 205 {
kfraser@12374 206 a->preempted = 1;
kfraser@12374 207 goto out;
kaf24@6607 208 }
kaf24@6486 209
kfraser@12374 210 if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
kfraser@12374 211 goto out;
kaf24@6486 212
kfraser@12374 213 for ( j = 0; j < (1 << a->extent_order); j++ )
kfraser@12374 214 if ( !guest_remove_page(a->domain, gmfn + j) )
kfraser@12374 215 goto out;
kaf24@6486 216 }
kaf24@6486 217
kfraser@12374 218 out:
kfraser@12374 219 a->nr_done = i;
kaf24@6486 220 }
kaf24@6486 221
kfraser@12374 222 static long translate_gpfn_list(
kaf24@9873 223 XEN_GUEST_HANDLE(xen_translate_gpfn_list_t) uop, unsigned long *progress)
kaf24@8871 224 {
kaf24@8871 225 struct xen_translate_gpfn_list op;
kaf24@10314 226 unsigned long i;
kaf24@10314 227 xen_pfn_t gpfn;
kaf24@10314 228 xen_pfn_t mfn;
kaf24@8871 229 struct domain *d;
kaf24@6486 230
kaf24@9068 231 if ( copy_from_guest(&op, uop, 1) )
kaf24@8871 232 return -EFAULT;
kaf24@8871 233
kaf24@8871 234 /* Is size too large for us to encode a continuation? */
ack@13295 235 if ( op.nr_gpfns > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) )
kaf24@8871 236 return -EINVAL;
kaf24@8871 237
kaf24@9068 238 if ( !guest_handle_okay(op.gpfn_list, op.nr_gpfns) ||
kaf24@9068 239 !guest_handle_okay(op.mfn_list, op.nr_gpfns) )
kaf24@8871 240 return -EFAULT;
kaf24@8871 241
kaf24@8871 242 if ( op.domid == DOMID_SELF )
kaf24@8871 243 op.domid = current->domain->domain_id;
kaf24@8871 244 else if ( !IS_PRIV(current->domain) )
kaf24@8871 245 return -EPERM;
kaf24@8871 246
kaf24@13663 247 if ( (d = get_domain_by_id(op.domid)) == NULL )
kaf24@8871 248 return -ESRCH;
kaf24@8871 249
kfraser@11212 250 if ( !shadow_mode_translate(d) )
kaf24@8871 251 {
kaf24@8871 252 put_domain(d);
kaf24@8871 253 return -EINVAL;
kaf24@8871 254 }
kaf24@8871 255
kaf24@8871 256 for ( i = *progress; i < op.nr_gpfns; i++ )
kaf24@8871 257 {
kaf24@8871 258 if ( hypercall_preempt_check() )
kaf24@8871 259 {
kaf24@8871 260 put_domain(d);
kaf24@8871 261 *progress = i;
kaf24@8871 262 return -EAGAIN;
kaf24@8871 263 }
kaf24@8871 264
kaf24@9068 265 if ( unlikely(__copy_from_guest_offset(&gpfn, op.gpfn_list, i, 1)) )
kaf24@8871 266 {
kaf24@8871 267 put_domain(d);
kaf24@8871 268 return -EFAULT;
kaf24@8871 269 }
kaf24@8871 270
kaf24@8871 271 mfn = gmfn_to_mfn(d, gpfn);
kaf24@8871 272
kaf24@9068 273 if ( unlikely(__copy_to_guest_offset(op.mfn_list, i, &mfn, 1)) )
kaf24@8871 274 {
kaf24@8871 275 put_domain(d);
kaf24@8871 276 return -EFAULT;
kaf24@8871 277 }
kaf24@8871 278 }
kaf24@8871 279
kaf24@8871 280 put_domain(d);
kaf24@8871 281 return 0;
kaf24@8871 282 }
kaf24@8871 283
kfraser@12374 284 static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
kfraser@10418 285 {
kfraser@10418 286 struct xen_memory_exchange exch;
kfraser@10418 287 LIST_HEAD(in_chunk_list);
kfraser@10418 288 LIST_HEAD(out_chunk_list);
kfraser@10418 289 unsigned long in_chunk_order, out_chunk_order;
kaf24@10459 290 xen_pfn_t gpfn, gmfn, mfn;
kfraser@10418 291 unsigned long i, j, k;
kfraser@11973 292 unsigned int memflags = 0, cpu;
kfraser@10418 293 long rc = 0;
kfraser@10418 294 struct domain *d;
kfraser@10418 295 struct page_info *page;
kfraser@10418 296
kfraser@10418 297 if ( copy_from_guest(&exch, arg, 1) )
kfraser@10418 298 return -EFAULT;
kfraser@10418 299
kfraser@10418 300 /* Various sanity checks. */
kfraser@10418 301 if ( (exch.nr_exchanged > exch.in.nr_extents) ||
kfraser@10418 302 /* Input and output domain identifiers match? */
kfraser@10418 303 (exch.in.domid != exch.out.domid) ||
kfraser@10418 304 /* Sizes of input and output lists do not overflow a long? */
kfraser@10418 305 ((~0UL >> exch.in.extent_order) < exch.in.nr_extents) ||
kfraser@10418 306 ((~0UL >> exch.out.extent_order) < exch.out.nr_extents) ||
kfraser@10418 307 /* Sizes of input and output lists match? */
kfraser@10418 308 ((exch.in.nr_extents << exch.in.extent_order) !=
kfraser@10418 309 (exch.out.nr_extents << exch.out.extent_order)) )
kfraser@10418 310 {
kfraser@10418 311 rc = -EINVAL;
kfraser@10418 312 goto fail_early;
kfraser@10418 313 }
kfraser@10418 314
kfraser@10418 315 /* Only privileged guests can allocate multi-page contiguous extents. */
kfraser@10418 316 if ( ((exch.in.extent_order != 0) || (exch.out.extent_order != 0)) &&
kfraser@10418 317 !multipage_allocation_permitted(current->domain) )
kfraser@10418 318 {
kfraser@10418 319 rc = -EPERM;
kfraser@10418 320 goto fail_early;
kfraser@10418 321 }
kfraser@10418 322
kfraser@10418 323 if ( (exch.out.address_bits != 0) &&
kfraser@10418 324 (exch.out.address_bits <
kfraser@10418 325 (get_order_from_pages(max_page) + PAGE_SHIFT)) )
kfraser@10418 326 {
kfraser@12641 327 if ( exch.out.address_bits < dma_bitsize )
kfraser@10418 328 {
kfraser@10418 329 rc = -ENOMEM;
kfraser@10418 330 goto fail_early;
kfraser@10418 331 }
kfraser@10418 332 memflags = MEMF_dma;
kfraser@10418 333 }
kfraser@10418 334
kfraser@10418 335 if ( exch.in.extent_order <= exch.out.extent_order )
kfraser@10418 336 {
kfraser@10418 337 in_chunk_order = exch.out.extent_order - exch.in.extent_order;
kfraser@10418 338 out_chunk_order = 0;
kfraser@10418 339 }
kfraser@10418 340 else
kfraser@10418 341 {
kfraser@10418 342 in_chunk_order = 0;
kfraser@10418 343 out_chunk_order = exch.in.extent_order - exch.out.extent_order;
kfraser@10418 344 }
kfraser@10418 345
kfraser@10418 346 /*
kfraser@10418 347 * Only support exchange on calling domain right now. Otherwise there are
kfraser@10418 348 * tricky corner cases to consider (e.g., DOMF_dying domain).
kfraser@10418 349 */
kfraser@10418 350 if ( unlikely(exch.in.domid != DOMID_SELF) )
kfraser@10418 351 {
kfraser@10418 352 rc = IS_PRIV(current->domain) ? -EINVAL : -EPERM;
kfraser@10418 353 goto fail_early;
kfraser@10418 354 }
kfraser@10418 355 d = current->domain;
kfraser@10418 356
kfraser@12374 357 cpu = select_local_cpu(d);
kfraser@11973 358
kfraser@12374 359 for ( i = (exch.nr_exchanged >> in_chunk_order);
kfraser@12374 360 i < (exch.in.nr_extents >> in_chunk_order);
kfraser@12374 361 i++ )
kfraser@10418 362 {
kfraser@10418 363 if ( hypercall_preempt_check() )
kfraser@10418 364 {
kfraser@12374 365 exch.nr_exchanged = i << in_chunk_order;
kfraser@10418 366 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 367 return -EFAULT;
kfraser@10418 368 return hypercall_create_continuation(
kfraser@10418 369 __HYPERVISOR_memory_op, "lh", XENMEM_exchange, arg);
kfraser@10418 370 }
kfraser@10418 371
kfraser@10418 372 /* Steal a chunk's worth of input pages from the domain. */
kfraser@10418 373 for ( j = 0; j < (1UL << in_chunk_order); j++ )
kfraser@10418 374 {
kfraser@10418 375 if ( unlikely(__copy_from_guest_offset(
kfraser@10418 376 &gmfn, exch.in.extent_start, (i<<in_chunk_order)+j, 1)) )
kfraser@10418 377 {
kfraser@10418 378 rc = -EFAULT;
kfraser@10418 379 goto fail;
kfraser@10418 380 }
kfraser@10418 381
kfraser@10418 382 for ( k = 0; k < (1UL << exch.in.extent_order); k++ )
kfraser@10418 383 {
kfraser@10418 384 mfn = gmfn_to_mfn(d, gmfn + k);
kfraser@10418 385 if ( unlikely(!mfn_valid(mfn)) )
kfraser@10418 386 {
kfraser@10418 387 rc = -EINVAL;
kfraser@10418 388 goto fail;
kfraser@10418 389 }
kfraser@10418 390
kfraser@10418 391 page = mfn_to_page(mfn);
kfraser@10418 392
kfraser@10418 393 if ( unlikely(steal_page(d, page, MEMF_no_refcount)) )
kfraser@10418 394 {
kfraser@10418 395 rc = -EINVAL;
kfraser@10418 396 goto fail;
kfraser@10418 397 }
kfraser@10418 398
kfraser@10418 399 list_add(&page->list, &in_chunk_list);
kfraser@10418 400 }
kfraser@10418 401 }
kfraser@10418 402
kfraser@10418 403 /* Allocate a chunk's worth of anonymous output pages. */
kfraser@10418 404 for ( j = 0; j < (1UL << out_chunk_order); j++ )
kfraser@10418 405 {
kfraser@12374 406 page = __alloc_domheap_pages(
kfraser@12374 407 NULL, cpu, exch.out.extent_order, memflags);
kfraser@10418 408 if ( unlikely(page == NULL) )
kfraser@10418 409 {
kfraser@10418 410 rc = -ENOMEM;
kfraser@10418 411 goto fail;
kfraser@10418 412 }
kfraser@10418 413
kfraser@10418 414 list_add(&page->list, &out_chunk_list);
kfraser@10418 415 }
kfraser@10418 416
kfraser@10418 417 /*
kfraser@10418 418 * Success! Beyond this point we cannot fail for this chunk.
kfraser@10418 419 */
kfraser@10418 420
kfraser@10418 421 /* Destroy final reference to each input page. */
kfraser@10418 422 while ( !list_empty(&in_chunk_list) )
kfraser@10418 423 {
kfraser@10418 424 page = list_entry(in_chunk_list.next, struct page_info, list);
kfraser@10418 425 list_del(&page->list);
kfraser@10418 426 if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
kfraser@10418 427 BUG();
kfraser@10418 428 mfn = page_to_mfn(page);
kfraser@10418 429 guest_physmap_remove_page(d, mfn_to_gmfn(d, mfn), mfn);
kfraser@10418 430 put_page(page);
kfraser@10418 431 }
kfraser@10418 432
kfraser@10418 433 /* Assign each output page to the domain. */
kfraser@10418 434 j = 0;
kfraser@10418 435 while ( !list_empty(&out_chunk_list) )
kfraser@10418 436 {
kfraser@10418 437 page = list_entry(out_chunk_list.next, struct page_info, list);
kfraser@10418 438 list_del(&page->list);
kfraser@10418 439 if ( assign_pages(d, page, exch.out.extent_order,
kfraser@10418 440 MEMF_no_refcount) )
kfraser@10418 441 BUG();
kfraser@10418 442
kfraser@10418 443 /* Note that we ignore errors accessing the output extent list. */
kfraser@10418 444 (void)__copy_from_guest_offset(
kfraser@10418 445 &gpfn, exch.out.extent_start, (i<<out_chunk_order)+j, 1);
kfraser@10418 446
kfraser@10418 447 mfn = page_to_mfn(page);
kfraser@10418 448 if ( unlikely(shadow_mode_translate(d)) )
kfraser@10418 449 {
kfraser@10418 450 for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
kfraser@10418 451 guest_physmap_add_page(d, gpfn + k, mfn + k);
kfraser@10418 452 }
kfraser@10418 453 else
kfraser@10418 454 {
kfraser@10418 455 for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
kfraser@10418 456 set_gpfn_from_mfn(mfn + k, gpfn + k);
kfraser@10418 457 (void)__copy_to_guest_offset(
kfraser@10418 458 exch.out.extent_start, (i<<out_chunk_order)+j, &mfn, 1);
kfraser@10418 459 }
kfraser@10418 460
kfraser@10418 461 j++;
kfraser@10418 462 }
kfraser@10418 463 BUG_ON(j != (1UL << out_chunk_order));
kfraser@10418 464 }
kfraser@10418 465
kfraser@12374 466 exch.nr_exchanged = exch.in.nr_extents;
kfraser@10418 467 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 468 rc = -EFAULT;
kfraser@10418 469 return rc;
kfraser@10418 470
kfraser@10418 471 /*
kfraser@10418 472 * Failed a chunk! Free any partial chunk work. Tell caller how many
kfraser@10418 473 * chunks succeeded.
kfraser@10418 474 */
kfraser@10418 475 fail:
kfraser@10418 476 /* Reassign any input pages we managed to steal. */
kfraser@10418 477 while ( !list_empty(&in_chunk_list) )
kfraser@10418 478 {
kfraser@10418 479 page = list_entry(in_chunk_list.next, struct page_info, list);
kfraser@10418 480 list_del(&page->list);
kfraser@10418 481 if ( assign_pages(d, page, 0, MEMF_no_refcount) )
kfraser@10418 482 BUG();
kfraser@10418 483 }
kfraser@10418 484
kfraser@10418 485 /* Free any output pages we managed to allocate. */
kfraser@10418 486 while ( !list_empty(&out_chunk_list) )
kfraser@10418 487 {
kfraser@10418 488 page = list_entry(out_chunk_list.next, struct page_info, list);
kfraser@10418 489 list_del(&page->list);
kfraser@10418 490 free_domheap_pages(page, exch.out.extent_order);
kfraser@10418 491 }
kfraser@10418 492
kfraser@12374 493 exch.nr_exchanged = i << in_chunk_order;
kfraser@10418 494
kfraser@10418 495 fail_early:
kfraser@10418 496 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 497 rc = -EFAULT;
kfraser@10418 498 return rc;
kfraser@10418 499 }
kfraser@10418 500
kaf24@9873 501 long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE(void) arg)
kaf24@6486 502 {
kaf24@6486 503 struct domain *d;
kfraser@12374 504 int rc, op;
kaf24@8871 505 unsigned long start_extent, progress;
kaf24@6486 506 struct xen_memory_reservation reservation;
kfraser@12374 507 struct memop_args args;
kaf24@7959 508 domid_t domid;
kaf24@6486 509
ack@13295 510 op = cmd & MEMOP_CMD_MASK;
kaf24@6486 511
kaf24@6486 512 switch ( op )
kaf24@6486 513 {
kaf24@6486 514 case XENMEM_increase_reservation:
kaf24@6486 515 case XENMEM_decrease_reservation:
kaf24@8673 516 case XENMEM_populate_physmap:
ack@13295 517 start_extent = cmd >> MEMOP_EXTENT_SHIFT;
kfraser@10418 518
kaf24@9068 519 if ( copy_from_guest(&reservation, arg, 1) )
kfraser@10418 520 return start_extent;
kaf24@6486 521
kaf24@8871 522 /* Is size too large for us to encode a continuation? */
ack@13295 523 if ( reservation.nr_extents > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) )
kfraser@10418 524 return start_extent;
kaf24@8871 525
kaf24@6486 526 if ( unlikely(start_extent > reservation.nr_extents) )
kfraser@10418 527 return start_extent;
kaf24@9068 528
kfraser@12374 529 args.extent_list = reservation.extent_start;
kfraser@12374 530 args.nr_extents = reservation.nr_extents;
kfraser@12374 531 args.extent_order = reservation.extent_order;
kfraser@12374 532 args.nr_done = start_extent;
kfraser@12374 533 args.preempted = 0;
kfraser@12374 534 args.memflags = 0;
kaf24@6486 535
kaf24@6701 536 if ( (reservation.address_bits != 0) &&
kaf24@6702 537 (reservation.address_bits <
kaf24@6702 538 (get_order_from_pages(max_page) + PAGE_SHIFT)) )
kaf24@6486 539 {
kfraser@12641 540 if ( reservation.address_bits < dma_bitsize )
kfraser@10418 541 return start_extent;
kfraser@12374 542 args.memflags = MEMF_dma;
kaf24@6486 543 }
kaf24@6486 544
kaf24@6486 545 if ( likely(reservation.domid == DOMID_SELF) )
kaf24@6486 546 d = current->domain;
kfraser@10418 547 else if ( !IS_PRIV(current->domain) ||
kaf24@13663 548 ((d = get_domain_by_id(reservation.domid)) == NULL) )
kfraser@10418 549 return start_extent;
kfraser@12374 550 args.domain = d;
kaf24@6486 551
kaf24@8673 552 switch ( op )
kaf24@8673 553 {
kaf24@8673 554 case XENMEM_increase_reservation:
kfraser@12374 555 increase_reservation(&args);
kaf24@8673 556 break;
kaf24@8673 557 case XENMEM_decrease_reservation:
kfraser@12374 558 decrease_reservation(&args);
kaf24@8673 559 break;
kfraser@12374 560 default: /* XENMEM_populate_physmap */
kfraser@12374 561 populate_physmap(&args);
kaf24@8673 562 break;
kaf24@8673 563 }
kaf24@6486 564
kaf24@6486 565 if ( unlikely(reservation.domid != DOMID_SELF) )
kaf24@6486 566 put_domain(d);
kaf24@6486 567
kfraser@12374 568 rc = args.nr_done;
kaf24@6486 569
kfraser@12374 570 if ( args.preempted )
kaf24@9068 571 return hypercall_create_continuation(
kaf24@9068 572 __HYPERVISOR_memory_op, "lh",
ack@13295 573 op | (rc << MEMOP_EXTENT_SHIFT), arg);
kaf24@6607 574
kaf24@6486 575 break;
kaf24@6486 576
kfraser@10418 577 case XENMEM_exchange:
kfraser@10418 578 rc = memory_exchange(guest_handle_cast(arg, xen_memory_exchange_t));
kfraser@10418 579 break;
kfraser@10418 580
kaf24@6486 581 case XENMEM_maximum_ram_page:
kaf24@7959 582 rc = max_page;
kaf24@7959 583 break;
kaf24@7959 584
kaf24@7959 585 case XENMEM_current_reservation:
kaf24@7959 586 case XENMEM_maximum_reservation:
kaf24@9068 587 if ( copy_from_guest(&domid, arg, 1) )
kaf24@6486 588 return -EFAULT;
kaf24@7959 589
kaf24@9068 590 if ( likely(domid == DOMID_SELF) )
kaf24@7959 591 d = current->domain;
kaf24@7959 592 else if ( !IS_PRIV(current->domain) )
kaf24@7959 593 return -EPERM;
kaf24@13663 594 else if ( (d = get_domain_by_id(domid)) == NULL )
kaf24@7959 595 return -ESRCH;
kaf24@7959 596
kaf24@7959 597 rc = (op == XENMEM_current_reservation) ? d->tot_pages : d->max_pages;
kaf24@7959 598
kaf24@7959 599 if ( unlikely(domid != DOMID_SELF) )
kaf24@7959 600 put_domain(d);
kaf24@7959 601
kaf24@6486 602 break;
kaf24@6486 603
kaf24@8871 604 case XENMEM_translate_gpfn_list:
ack@13295 605 progress = cmd >> MEMOP_EXTENT_SHIFT;
kaf24@9068 606 rc = translate_gpfn_list(
kaf24@9068 607 guest_handle_cast(arg, xen_translate_gpfn_list_t),
kaf24@9068 608 &progress);
kaf24@8871 609 if ( rc == -EAGAIN )
kaf24@9068 610 return hypercall_create_continuation(
kaf24@9068 611 __HYPERVISOR_memory_op, "lh",
ack@13295 612 op | (progress << MEMOP_EXTENT_SHIFT), arg);
kaf24@8871 613 break;
kaf24@8871 614
kaf24@6486 615 default:
kaf24@8059 616 rc = arch_memory_op(op, arg);
kaf24@6486 617 break;
kaf24@6486 618 }
kaf24@6486 619
kaf24@6486 620 return rc;
kaf24@6486 621 }
kaf24@6486 622
kaf24@6486 623 /*
kaf24@6486 624 * Local variables:
kaf24@6486 625 * mode: C
kaf24@6486 626 * c-set-style: "BSD"
kaf24@6486 627 * c-basic-offset: 4
kaf24@6486 628 * tab-width: 4
kaf24@6486 629 * indent-tabs-mode: nil
kaf24@6486 630 * End:
kaf24@6486 631 */