ia64/xen-unstable

annotate xen/common/memory.c @ 17860:6e92603ed9f2

Introduce guest_handle_subrange_okay() for checking sub-sections of an
argument array. Needed where a compat shim is splitting up a 32-bit
guest's larger argument array, and only the currently-active part of
the translated array is contained within the compat_arg_xlat_area.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 13 13:49:56 2008 +0100 (2008-06-13)
parents c684cf331f94
children 4bdc3de246c3
rev   line source
kaf24@6486 1 /******************************************************************************
kaf24@6486 2 * memory.c
kaf24@6486 3 *
kaf24@6486 4 * Code to handle memory-related requests.
kaf24@6486 5 *
kaf24@6486 6 * Copyright (c) 2003-2004, B Dragovic
kaf24@6486 7 * Copyright (c) 2003-2005, K A Fraser
kaf24@6486 8 */
kaf24@6486 9
kaf24@6486 10 #include <xen/config.h>
kaf24@6486 11 #include <xen/types.h>
kaf24@6486 12 #include <xen/lib.h>
kaf24@6486 13 #include <xen/mm.h>
kaf24@6486 14 #include <xen/perfc.h>
kaf24@6486 15 #include <xen/sched.h>
kaf24@6486 16 #include <xen/event.h>
Tim@15635 17 #include <xen/paging.h>
kaf24@8468 18 #include <xen/iocap.h>
kaf24@9068 19 #include <xen/guest_access.h>
ack@13295 20 #include <xen/hypercall.h>
kaf24@11219 21 #include <xen/errno.h>
kaf24@6486 22 #include <asm/current.h>
kaf24@6486 23 #include <asm/hardirq.h>
keir@17385 24 #include <xen/numa.h>
kaf24@6486 25 #include <public/memory.h>
kfraser@15815 26 #include <xsm/xsm.h>
kaf24@6486 27
kfraser@12374 28 struct memop_args {
kfraser@12374 29 /* INPUT */
kfraser@12374 30 struct domain *domain; /* Domain to be affected. */
kfraser@12374 31 XEN_GUEST_HANDLE(xen_pfn_t) extent_list; /* List of extent base addrs. */
kfraser@12374 32 unsigned int nr_extents; /* Number of extents to allocate or free. */
kfraser@12374 33 unsigned int extent_order; /* Size of each extent. */
kfraser@12374 34 unsigned int memflags; /* Allocation flags. */
kfraser@12374 35
kfraser@12374 36 /* INPUT/OUTPUT */
kfraser@12374 37 unsigned int nr_done; /* Number of extents processed so far. */
kfraser@12374 38 int preempted; /* Was the hypercall preempted? */
kfraser@12374 39 };
kfraser@12374 40
kfraser@12374 41 static void increase_reservation(struct memop_args *a)
kaf24@6486 42 {
kaf24@8726 43 struct page_info *page;
kaf24@10314 44 unsigned long i;
kaf24@10314 45 xen_pfn_t mfn;
kfraser@12374 46 struct domain *d = a->domain;
keir@17385 47 unsigned int node = domain_to_node(d);
kaf24@6486 48
kfraser@12374 49 if ( !guest_handle_is_null(a->extent_list) &&
keir@17860 50 !guest_handle_subrange_okay(a->extent_list, a->nr_done,
keir@17860 51 a->nr_extents-1) )
kfraser@12374 52 return;
kaf24@6486 53
kfraser@12374 54 if ( (a->extent_order != 0) &&
kaf24@8468 55 !multipage_allocation_permitted(current->domain) )
kfraser@12374 56 return;
kaf24@6486 57
kfraser@12374 58 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@6486 59 {
kaf24@6486 60 if ( hypercall_preempt_check() )
kaf24@6607 61 {
kfraser@12374 62 a->preempted = 1;
kfraser@12374 63 goto out;
kaf24@6607 64 }
kaf24@6486 65
keir@17385 66 page = alloc_domheap_pages(
keir@17385 67 d, a->extent_order, a->memflags | MEMF_node(node));
kfraser@12374 68 if ( unlikely(page == NULL) )
kaf24@6486 69 {
kaf24@12038 70 gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
kfraser@10418 71 "id=%d memflags=%x (%ld of %d)\n",
kfraser@12374 72 a->extent_order, d->domain_id, a->memflags,
kfraser@12374 73 i, a->nr_extents);
kfraser@12374 74 goto out;
kaf24@6486 75 }
kaf24@6486 76
kaf24@6486 77 /* Inform the domain of the new page's machine address. */
kfraser@12374 78 if ( !guest_handle_is_null(a->extent_list) )
kaf24@8859 79 {
kaf24@8859 80 mfn = page_to_mfn(page);
kfraser@12374 81 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
kfraser@12374 82 goto out;
kaf24@8859 83 }
kaf24@6486 84 }
kaf24@6486 85
kfraser@12374 86 out:
kfraser@12374 87 a->nr_done = i;
kaf24@6486 88 }
sos22@8688 89
kfraser@12374 90 static void populate_physmap(struct memop_args *a)
kaf24@8673 91 {
kaf24@8726 92 struct page_info *page;
kaf24@10314 93 unsigned long i, j;
kfraser@12374 94 xen_pfn_t gpfn, mfn;
kfraser@12374 95 struct domain *d = a->domain;
keir@17385 96 unsigned int node = domain_to_node(d);
kaf24@8673 97
keir@17860 98 if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
keir@17860 99 a->nr_extents-1) )
kfraser@12374 100 return;
kaf24@8673 101
kfraser@12374 102 if ( (a->extent_order != 0) &&
kaf24@8673 103 !multipage_allocation_permitted(current->domain) )
kfraser@12374 104 return;
kaf24@8673 105
kfraser@12374 106 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@8673 107 {
kaf24@8673 108 if ( hypercall_preempt_check() )
kaf24@8673 109 {
kfraser@12374 110 a->preempted = 1;
sos22@8688 111 goto out;
kaf24@8673 112 }
kaf24@8673 113
kfraser@12374 114 if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
kaf24@8859 115 goto out;
kaf24@8859 116
keir@17385 117 page = alloc_domheap_pages(
keir@17385 118 d, a->extent_order, a->memflags | MEMF_node(node));
kfraser@12374 119 if ( unlikely(page == NULL) )
kaf24@8673 120 {
kaf24@12038 121 gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
kfraser@12374 122 "id=%d memflags=%x (%ld of %d)\n",
kfraser@12374 123 a->extent_order, d->domain_id, a->memflags,
kfraser@12374 124 i, a->nr_extents);
sos22@8688 125 goto out;
kaf24@8673 126 }
kaf24@8673 127
kaf24@8726 128 mfn = page_to_mfn(page);
keir@17727 129 guest_physmap_add_page(d, gpfn, mfn, a->extent_order);
kaf24@8673 130
keir@17727 131 if ( !paging_mode_translate(d) )
kaf24@8694 132 {
kfraser@12374 133 for ( j = 0; j < (1 << a->extent_order); j++ )
kaf24@8736 134 set_gpfn_from_mfn(mfn + j, gpfn + j);
kaf24@8673 135
sos22@8688 136 /* Inform the domain of the new page's machine address. */
kfraser@12374 137 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
sos22@8688 138 goto out;
sos22@8688 139 }
kaf24@8673 140 }
kaf24@8673 141
sos22@8688 142 out:
kfraser@12374 143 a->nr_done = i;
kaf24@8673 144 }
cl349@9211 145
kfraser@12374 146 int guest_remove_page(struct domain *d, unsigned long gmfn)
cl349@9211 147 {
cl349@9211 148 struct page_info *page;
cl349@9211 149 unsigned long mfn;
cl349@9211 150
cl349@9211 151 mfn = gmfn_to_mfn(d, gmfn);
cl349@9211 152 if ( unlikely(!mfn_valid(mfn)) )
cl349@9211 153 {
kaf24@12038 154 gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
tdeegan@11172 155 d->domain_id, gmfn);
cl349@9211 156 return 0;
cl349@9211 157 }
cl349@9211 158
cl349@9211 159 page = mfn_to_page(mfn);
cl349@9211 160 if ( unlikely(!get_page(page, d)) )
cl349@9211 161 {
kaf24@12038 162 gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id);
cl349@9211 163 return 0;
cl349@9211 164 }
cl349@9211 165
cl349@9211 166 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
cl349@9211 167 put_page_and_type(page);
cl349@9211 168
cl349@9211 169 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
cl349@9211 170 put_page(page);
cl349@9211 171
keir@17664 172 guest_physmap_remove_page(d, gmfn, mfn, 0);
cl349@9211 173
cl349@9211 174 put_page(page);
cl349@9211 175
cl349@9211 176 return 1;
cl349@9211 177 }
cl349@9211 178
kfraser@12374 179 static void decrease_reservation(struct memop_args *a)
kaf24@6486 180 {
kaf24@10314 181 unsigned long i, j;
kaf24@10314 182 xen_pfn_t gmfn;
kaf24@6486 183
keir@17860 184 if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
keir@17860 185 a->nr_extents-1) )
kfraser@12374 186 return;
kaf24@6486 187
kfraser@12374 188 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@6486 189 {
kaf24@6486 190 if ( hypercall_preempt_check() )
kaf24@6607 191 {
kfraser@12374 192 a->preempted = 1;
kfraser@12374 193 goto out;
kaf24@6607 194 }
kaf24@6486 195
kfraser@12374 196 if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
kfraser@12374 197 goto out;
kaf24@6486 198
kfraser@12374 199 for ( j = 0; j < (1 << a->extent_order); j++ )
kfraser@12374 200 if ( !guest_remove_page(a->domain, gmfn + j) )
kfraser@12374 201 goto out;
kaf24@6486 202 }
kaf24@6486 203
kfraser@12374 204 out:
kfraser@12374 205 a->nr_done = i;
kaf24@6486 206 }
kaf24@6486 207
kfraser@12374 208 static long translate_gpfn_list(
kaf24@9873 209 XEN_GUEST_HANDLE(xen_translate_gpfn_list_t) uop, unsigned long *progress)
kaf24@8871 210 {
kaf24@8871 211 struct xen_translate_gpfn_list op;
kaf24@10314 212 unsigned long i;
kaf24@10314 213 xen_pfn_t gpfn;
kaf24@10314 214 xen_pfn_t mfn;
kaf24@8871 215 struct domain *d;
kfraser@15815 216 int rc;
kaf24@6486 217
kaf24@9068 218 if ( copy_from_guest(&op, uop, 1) )
kaf24@8871 219 return -EFAULT;
kaf24@8871 220
kaf24@8871 221 /* Is size too large for us to encode a continuation? */
ack@13295 222 if ( op.nr_gpfns > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) )
kaf24@8871 223 return -EINVAL;
kaf24@8871 224
keir@17860 225 if ( !guest_handle_subrange_okay(op.gpfn_list, *progress, op.nr_gpfns-1) ||
keir@17860 226 !guest_handle_subrange_okay(op.mfn_list, *progress, op.nr_gpfns-1) )
kaf24@8871 227 return -EFAULT;
kaf24@8871 228
kaf24@8871 229 if ( op.domid == DOMID_SELF )
keir@17349 230 {
keir@17349 231 d = rcu_lock_current_domain();
keir@17349 232 }
keir@17349 233 else
keir@17349 234 {
keir@17349 235 if ( (d = rcu_lock_domain_by_id(op.domid)) == NULL )
keir@16856 236 return -ESRCH;
keir@17349 237 if ( !IS_PRIV_FOR(current->domain, d) )
keir@17349 238 {
keir@16856 239 rcu_unlock_domain(d);
keir@16856 240 return -EPERM;
keir@16856 241 }
keir@16856 242 }
kaf24@8871 243
kaf24@8871 244
Tim@15635 245 if ( !paging_mode_translate(d) )
kaf24@8871 246 {
kfraser@14192 247 rcu_unlock_domain(d);
kaf24@8871 248 return -EINVAL;
kaf24@8871 249 }
kaf24@8871 250
kaf24@8871 251 for ( i = *progress; i < op.nr_gpfns; i++ )
kaf24@8871 252 {
kaf24@8871 253 if ( hypercall_preempt_check() )
kaf24@8871 254 {
kfraser@14192 255 rcu_unlock_domain(d);
kaf24@8871 256 *progress = i;
kaf24@8871 257 return -EAGAIN;
kaf24@8871 258 }
kaf24@8871 259
kaf24@9068 260 if ( unlikely(__copy_from_guest_offset(&gpfn, op.gpfn_list, i, 1)) )
kaf24@8871 261 {
kfraser@14192 262 rcu_unlock_domain(d);
kaf24@8871 263 return -EFAULT;
kaf24@8871 264 }
kaf24@8871 265
kaf24@8871 266 mfn = gmfn_to_mfn(d, gpfn);
kaf24@8871 267
kfraser@15815 268 rc = xsm_translate_gpfn_list(current->domain, mfn);
kfraser@15815 269 if ( rc )
kfraser@15815 270 {
kfraser@15815 271 rcu_unlock_domain(d);
kfraser@15815 272 return rc;
kfraser@15815 273 }
kfraser@15815 274
kaf24@9068 275 if ( unlikely(__copy_to_guest_offset(op.mfn_list, i, &mfn, 1)) )
kaf24@8871 276 {
kfraser@14192 277 rcu_unlock_domain(d);
kaf24@8871 278 return -EFAULT;
kaf24@8871 279 }
kaf24@8871 280 }
kaf24@8871 281
kfraser@14192 282 rcu_unlock_domain(d);
kaf24@8871 283 return 0;
kaf24@8871 284 }
kaf24@8871 285
kfraser@12374 286 static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
kfraser@10418 287 {
kfraser@10418 288 struct xen_memory_exchange exch;
kfraser@10418 289 LIST_HEAD(in_chunk_list);
kfraser@10418 290 LIST_HEAD(out_chunk_list);
kfraser@10418 291 unsigned long in_chunk_order, out_chunk_order;
kaf24@10459 292 xen_pfn_t gpfn, gmfn, mfn;
kfraser@10418 293 unsigned long i, j, k;
keir@17385 294 unsigned int memflags = 0;
kfraser@10418 295 long rc = 0;
kfraser@10418 296 struct domain *d;
kfraser@10418 297 struct page_info *page;
kfraser@10418 298
kfraser@10418 299 if ( copy_from_guest(&exch, arg, 1) )
kfraser@10418 300 return -EFAULT;
kfraser@10418 301
kfraser@10418 302 /* Various sanity checks. */
kfraser@10418 303 if ( (exch.nr_exchanged > exch.in.nr_extents) ||
kfraser@10418 304 /* Input and output domain identifiers match? */
kfraser@10418 305 (exch.in.domid != exch.out.domid) ||
kfraser@10418 306 /* Sizes of input and output lists do not overflow a long? */
kfraser@10418 307 ((~0UL >> exch.in.extent_order) < exch.in.nr_extents) ||
kfraser@10418 308 ((~0UL >> exch.out.extent_order) < exch.out.nr_extents) ||
kfraser@10418 309 /* Sizes of input and output lists match? */
kfraser@10418 310 ((exch.in.nr_extents << exch.in.extent_order) !=
kfraser@10418 311 (exch.out.nr_extents << exch.out.extent_order)) )
kfraser@10418 312 {
kfraser@10418 313 rc = -EINVAL;
kfraser@10418 314 goto fail_early;
kfraser@10418 315 }
kfraser@10418 316
kfraser@10418 317 /* Only privileged guests can allocate multi-page contiguous extents. */
kfraser@10418 318 if ( ((exch.in.extent_order != 0) || (exch.out.extent_order != 0)) &&
kfraser@10418 319 !multipage_allocation_permitted(current->domain) )
kfraser@10418 320 {
kfraser@10418 321 rc = -EPERM;
kfraser@10418 322 goto fail_early;
kfraser@10418 323 }
kfraser@10418 324
kfraser@10418 325 if ( exch.in.extent_order <= exch.out.extent_order )
kfraser@10418 326 {
kfraser@10418 327 in_chunk_order = exch.out.extent_order - exch.in.extent_order;
kfraser@10418 328 out_chunk_order = 0;
kfraser@10418 329 }
kfraser@10418 330 else
kfraser@10418 331 {
kfraser@10418 332 in_chunk_order = 0;
kfraser@10418 333 out_chunk_order = exch.in.extent_order - exch.out.extent_order;
kfraser@10418 334 }
kfraser@10418 335
kfraser@10418 336 /*
kfraser@10418 337 * Only support exchange on calling domain right now. Otherwise there are
kfraser@14642 338 * tricky corner cases to consider (e.g., dying domain).
kfraser@10418 339 */
kfraser@10418 340 if ( unlikely(exch.in.domid != DOMID_SELF) )
kfraser@10418 341 {
kfraser@10418 342 rc = IS_PRIV(current->domain) ? -EINVAL : -EPERM;
kfraser@10418 343 goto fail_early;
kfraser@10418 344 }
kfraser@10418 345 d = current->domain;
kfraser@10418 346
keir@16548 347 memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
keir@16549 348 d, exch.out.address_bits ? : (BITS_PER_LONG+PAGE_SHIFT)));
keir@17385 349 memflags |= MEMF_node(domain_to_node(d));
kfraser@11973 350
kfraser@12374 351 for ( i = (exch.nr_exchanged >> in_chunk_order);
kfraser@12374 352 i < (exch.in.nr_extents >> in_chunk_order);
kfraser@12374 353 i++ )
kfraser@10418 354 {
kfraser@10418 355 if ( hypercall_preempt_check() )
kfraser@10418 356 {
kfraser@12374 357 exch.nr_exchanged = i << in_chunk_order;
kfraser@10418 358 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 359 return -EFAULT;
kfraser@10418 360 return hypercall_create_continuation(
kfraser@10418 361 __HYPERVISOR_memory_op, "lh", XENMEM_exchange, arg);
kfraser@10418 362 }
kfraser@10418 363
kfraser@10418 364 /* Steal a chunk's worth of input pages from the domain. */
kfraser@10418 365 for ( j = 0; j < (1UL << in_chunk_order); j++ )
kfraser@10418 366 {
kfraser@10418 367 if ( unlikely(__copy_from_guest_offset(
kfraser@10418 368 &gmfn, exch.in.extent_start, (i<<in_chunk_order)+j, 1)) )
kfraser@10418 369 {
kfraser@10418 370 rc = -EFAULT;
kfraser@10418 371 goto fail;
kfraser@10418 372 }
kfraser@10418 373
kfraser@10418 374 for ( k = 0; k < (1UL << exch.in.extent_order); k++ )
kfraser@10418 375 {
kfraser@10418 376 mfn = gmfn_to_mfn(d, gmfn + k);
kfraser@10418 377 if ( unlikely(!mfn_valid(mfn)) )
kfraser@10418 378 {
kfraser@10418 379 rc = -EINVAL;
kfraser@10418 380 goto fail;
kfraser@10418 381 }
kfraser@10418 382
kfraser@10418 383 page = mfn_to_page(mfn);
kfraser@10418 384
kfraser@10418 385 if ( unlikely(steal_page(d, page, MEMF_no_refcount)) )
kfraser@10418 386 {
kfraser@10418 387 rc = -EINVAL;
kfraser@10418 388 goto fail;
kfraser@10418 389 }
kfraser@10418 390
kfraser@10418 391 list_add(&page->list, &in_chunk_list);
kfraser@10418 392 }
kfraser@10418 393 }
kfraser@10418 394
kfraser@10418 395 /* Allocate a chunk's worth of anonymous output pages. */
kfraser@10418 396 for ( j = 0; j < (1UL << out_chunk_order); j++ )
kfraser@10418 397 {
keir@17385 398 page = alloc_domheap_pages(NULL, exch.out.extent_order, memflags);
kfraser@10418 399 if ( unlikely(page == NULL) )
kfraser@10418 400 {
kfraser@10418 401 rc = -ENOMEM;
kfraser@10418 402 goto fail;
kfraser@10418 403 }
kfraser@10418 404
kfraser@10418 405 list_add(&page->list, &out_chunk_list);
kfraser@10418 406 }
kfraser@10418 407
kfraser@10418 408 /*
kfraser@10418 409 * Success! Beyond this point we cannot fail for this chunk.
kfraser@10418 410 */
kfraser@10418 411
kfraser@10418 412 /* Destroy final reference to each input page. */
kfraser@10418 413 while ( !list_empty(&in_chunk_list) )
kfraser@10418 414 {
kfraser@10418 415 page = list_entry(in_chunk_list.next, struct page_info, list);
kfraser@10418 416 list_del(&page->list);
kfraser@10418 417 if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
kfraser@10418 418 BUG();
kfraser@10418 419 mfn = page_to_mfn(page);
keir@17664 420 guest_physmap_remove_page(d, mfn_to_gmfn(d, mfn), mfn, 0);
kfraser@10418 421 put_page(page);
kfraser@10418 422 }
kfraser@10418 423
kfraser@10418 424 /* Assign each output page to the domain. */
kfraser@10418 425 j = 0;
kfraser@10418 426 while ( !list_empty(&out_chunk_list) )
kfraser@10418 427 {
kfraser@10418 428 page = list_entry(out_chunk_list.next, struct page_info, list);
kfraser@10418 429 list_del(&page->list);
kfraser@10418 430 if ( assign_pages(d, page, exch.out.extent_order,
kfraser@10418 431 MEMF_no_refcount) )
kfraser@10418 432 BUG();
kfraser@10418 433
kfraser@10418 434 /* Note that we ignore errors accessing the output extent list. */
kfraser@10418 435 (void)__copy_from_guest_offset(
kfraser@10418 436 &gpfn, exch.out.extent_start, (i<<out_chunk_order)+j, 1);
kfraser@10418 437
kfraser@10418 438 mfn = page_to_mfn(page);
keir@17727 439 guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order);
keir@17727 440
keir@17727 441 if ( !paging_mode_translate(d) )
kfraser@10418 442 {
kfraser@10418 443 for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
kfraser@10418 444 set_gpfn_from_mfn(mfn + k, gpfn + k);
kfraser@10418 445 (void)__copy_to_guest_offset(
kfraser@10418 446 exch.out.extent_start, (i<<out_chunk_order)+j, &mfn, 1);
kfraser@10418 447 }
kfraser@10418 448
kfraser@10418 449 j++;
kfraser@10418 450 }
kfraser@10418 451 BUG_ON(j != (1UL << out_chunk_order));
kfraser@10418 452 }
kfraser@10418 453
kfraser@12374 454 exch.nr_exchanged = exch.in.nr_extents;
kfraser@10418 455 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 456 rc = -EFAULT;
kfraser@10418 457 return rc;
kfraser@10418 458
kfraser@10418 459 /*
kfraser@10418 460 * Failed a chunk! Free any partial chunk work. Tell caller how many
kfraser@10418 461 * chunks succeeded.
kfraser@10418 462 */
kfraser@10418 463 fail:
kfraser@10418 464 /* Reassign any input pages we managed to steal. */
kfraser@10418 465 while ( !list_empty(&in_chunk_list) )
kfraser@10418 466 {
kfraser@10418 467 page = list_entry(in_chunk_list.next, struct page_info, list);
kfraser@10418 468 list_del(&page->list);
kfraser@10418 469 if ( assign_pages(d, page, 0, MEMF_no_refcount) )
kfraser@10418 470 BUG();
kfraser@10418 471 }
kfraser@10418 472
kfraser@10418 473 /* Free any output pages we managed to allocate. */
kfraser@10418 474 while ( !list_empty(&out_chunk_list) )
kfraser@10418 475 {
kfraser@10418 476 page = list_entry(out_chunk_list.next, struct page_info, list);
kfraser@10418 477 list_del(&page->list);
kfraser@10418 478 free_domheap_pages(page, exch.out.extent_order);
kfraser@10418 479 }
kfraser@10418 480
kfraser@12374 481 exch.nr_exchanged = i << in_chunk_order;
kfraser@10418 482
kfraser@10418 483 fail_early:
kfraser@10418 484 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 485 rc = -EFAULT;
kfraser@10418 486 return rc;
kfraser@10418 487 }
kfraser@10418 488
kaf24@9873 489 long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE(void) arg)
kaf24@6486 490 {
kaf24@6486 491 struct domain *d;
kfraser@12374 492 int rc, op;
kaf24@8871 493 unsigned long start_extent, progress;
kaf24@6486 494 struct xen_memory_reservation reservation;
kfraser@12374 495 struct memop_args args;
kaf24@7959 496 domid_t domid;
kaf24@6486 497
ack@13295 498 op = cmd & MEMOP_CMD_MASK;
kaf24@6486 499
kaf24@6486 500 switch ( op )
kaf24@6486 501 {
kaf24@6486 502 case XENMEM_increase_reservation:
kaf24@6486 503 case XENMEM_decrease_reservation:
kaf24@8673 504 case XENMEM_populate_physmap:
ack@13295 505 start_extent = cmd >> MEMOP_EXTENT_SHIFT;
kfraser@10418 506
kaf24@9068 507 if ( copy_from_guest(&reservation, arg, 1) )
kfraser@10418 508 return start_extent;
kaf24@6486 509
kaf24@8871 510 /* Is size too large for us to encode a continuation? */
ack@13295 511 if ( reservation.nr_extents > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) )
kfraser@10418 512 return start_extent;
kaf24@8871 513
kaf24@6486 514 if ( unlikely(start_extent > reservation.nr_extents) )
kfraser@10418 515 return start_extent;
kaf24@9068 516
kfraser@12374 517 args.extent_list = reservation.extent_start;
kfraser@12374 518 args.nr_extents = reservation.nr_extents;
kfraser@12374 519 args.extent_order = reservation.extent_order;
kfraser@12374 520 args.nr_done = start_extent;
kfraser@12374 521 args.preempted = 0;
kfraser@12374 522 args.memflags = 0;
kaf24@6486 523
kaf24@6701 524 if ( (reservation.address_bits != 0) &&
kaf24@6702 525 (reservation.address_bits <
kaf24@6702 526 (get_order_from_pages(max_page) + PAGE_SHIFT)) )
kaf24@6486 527 {
kfraser@14103 528 if ( reservation.address_bits <= PAGE_SHIFT )
kfraser@10418 529 return start_extent;
kfraser@14103 530 args.memflags = MEMF_bits(reservation.address_bits);
kaf24@6486 531 }
kaf24@6486 532
kaf24@6486 533 if ( likely(reservation.domid == DOMID_SELF) )
keir@17349 534 {
keir@17349 535 d = rcu_lock_current_domain();
keir@17349 536 }
keir@17349 537 else
keir@17349 538 {
keir@17349 539 if ( (d = rcu_lock_domain_by_id(reservation.domid)) == NULL )
keir@16856 540 return start_extent;
keir@17349 541 if ( !IS_PRIV_FOR(current->domain, d) )
keir@17349 542 {
keir@16856 543 rcu_unlock_domain(d);
keir@16856 544 return start_extent;
keir@16856 545 }
keir@16856 546 }
kfraser@12374 547 args.domain = d;
kaf24@6486 548
kfraser@15815 549 rc = xsm_memory_adjust_reservation(current->domain, d);
kfraser@15815 550 if ( rc )
kfraser@15815 551 {
keir@17349 552 rcu_unlock_domain(d);
kfraser@15815 553 return rc;
kfraser@15815 554 }
kfraser@15815 555
kaf24@8673 556 switch ( op )
kaf24@8673 557 {
kaf24@8673 558 case XENMEM_increase_reservation:
kfraser@12374 559 increase_reservation(&args);
kaf24@8673 560 break;
kaf24@8673 561 case XENMEM_decrease_reservation:
kfraser@12374 562 decrease_reservation(&args);
kaf24@8673 563 break;
kfraser@12374 564 default: /* XENMEM_populate_physmap */
kfraser@12374 565 populate_physmap(&args);
kaf24@8673 566 break;
kaf24@8673 567 }
kaf24@6486 568
keir@17349 569 rcu_unlock_domain(d);
kaf24@6486 570
kfraser@12374 571 rc = args.nr_done;
kaf24@6486 572
kfraser@12374 573 if ( args.preempted )
kaf24@9068 574 return hypercall_create_continuation(
kaf24@9068 575 __HYPERVISOR_memory_op, "lh",
ack@13295 576 op | (rc << MEMOP_EXTENT_SHIFT), arg);
kaf24@6607 577
kaf24@6486 578 break;
kaf24@6486 579
kfraser@10418 580 case XENMEM_exchange:
kfraser@10418 581 rc = memory_exchange(guest_handle_cast(arg, xen_memory_exchange_t));
kfraser@10418 582 break;
kfraser@10418 583
kaf24@6486 584 case XENMEM_maximum_ram_page:
kaf24@7959 585 rc = max_page;
kaf24@7959 586 break;
kaf24@7959 587
kaf24@7959 588 case XENMEM_current_reservation:
kaf24@7959 589 case XENMEM_maximum_reservation:
kfraser@14471 590 case XENMEM_maximum_gpfn:
kaf24@9068 591 if ( copy_from_guest(&domid, arg, 1) )
kaf24@6486 592 return -EFAULT;
kaf24@7959 593
kaf24@9068 594 if ( likely(domid == DOMID_SELF) )
keir@17349 595 {
keir@17349 596 d = rcu_lock_current_domain();
keir@17349 597 }
keir@17349 598 else
keir@17349 599 {
keir@17349 600 if ( (d = rcu_lock_domain_by_id(domid)) == NULL )
keir@16856 601 return -ESRCH;
keir@17349 602 if ( !IS_PRIV_FOR(current->domain, d) )
keir@17349 603 {
keir@16856 604 rcu_unlock_domain(d);
keir@16856 605 return -EPERM;
keir@16856 606 }
keir@16856 607 }
kaf24@7959 608
kfraser@15815 609 rc = xsm_memory_stat_reservation(current->domain, d);
kfraser@15815 610 if ( rc )
kfraser@15815 611 {
keir@17349 612 rcu_unlock_domain(d);
kfraser@15815 613 return rc;
kfraser@15815 614 }
kfraser@15815 615
kfraser@14471 616 switch ( op )
kfraser@14471 617 {
kfraser@14471 618 case XENMEM_current_reservation:
kfraser@14471 619 rc = d->tot_pages;
kfraser@14471 620 break;
kfraser@14471 621 case XENMEM_maximum_reservation:
kfraser@14471 622 rc = d->max_pages;
kfraser@14471 623 break;
kfraser@14471 624 default:
kfraser@14471 625 ASSERT(op == XENMEM_maximum_gpfn);
kfraser@14471 626 rc = domain_get_maximum_gpfn(d);
kfraser@14471 627 break;
kfraser@14471 628 }
kaf24@7959 629
keir@17349 630 rcu_unlock_domain(d);
kaf24@7959 631
kaf24@6486 632 break;
kaf24@6486 633
kaf24@8871 634 case XENMEM_translate_gpfn_list:
ack@13295 635 progress = cmd >> MEMOP_EXTENT_SHIFT;
kaf24@9068 636 rc = translate_gpfn_list(
kaf24@9068 637 guest_handle_cast(arg, xen_translate_gpfn_list_t),
kaf24@9068 638 &progress);
kaf24@8871 639 if ( rc == -EAGAIN )
kaf24@9068 640 return hypercall_create_continuation(
kaf24@9068 641 __HYPERVISOR_memory_op, "lh",
ack@13295 642 op | (progress << MEMOP_EXTENT_SHIFT), arg);
kaf24@8871 643 break;
kaf24@8871 644
kaf24@6486 645 default:
kaf24@8059 646 rc = arch_memory_op(op, arg);
kaf24@6486 647 break;
kaf24@6486 648 }
kaf24@6486 649
kaf24@6486 650 return rc;
kaf24@6486 651 }
kaf24@6486 652
kaf24@6486 653 /*
kaf24@6486 654 * Local variables:
kaf24@6486 655 * mode: C
kaf24@6486 656 * c-set-style: "BSD"
kaf24@6486 657 * c-basic-offset: 4
kaf24@6486 658 * tab-width: 4
kaf24@6486 659 * indent-tabs-mode: nil
kaf24@6486 660 * End:
kaf24@6486 661 */