ia64/xen-unstable

annotate xen/common/memory.c @ 16856:cff4c8a1aa28

New XEN_DOMCTL_set_target
Stubdomains (and probably other domain disagregation elements too)
need to be able to tinker with another domain. This adds IS_PRIV_FOR
that extends IS_PRIV by allowing domains to have privileges over a
given "target" domain. XEN_DOMCTL_set_target permits to set this
"target". A new 'target' configuration option makes the domain builder
use it.

Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 23 13:21:44 2008 +0000 (2008-01-23)
parents baf90ee3c1da
children 4e2e98c2098e
rev   line source
kaf24@6486 1 /******************************************************************************
kaf24@6486 2 * memory.c
kaf24@6486 3 *
kaf24@6486 4 * Code to handle memory-related requests.
kaf24@6486 5 *
kaf24@6486 6 * Copyright (c) 2003-2004, B Dragovic
kaf24@6486 7 * Copyright (c) 2003-2005, K A Fraser
kaf24@6486 8 */
kaf24@6486 9
kaf24@6486 10 #include <xen/config.h>
kaf24@6486 11 #include <xen/types.h>
kaf24@6486 12 #include <xen/lib.h>
kaf24@6486 13 #include <xen/mm.h>
kaf24@6486 14 #include <xen/perfc.h>
kaf24@6486 15 #include <xen/sched.h>
kaf24@6486 16 #include <xen/event.h>
Tim@15635 17 #include <xen/paging.h>
kaf24@8468 18 #include <xen/iocap.h>
kaf24@9068 19 #include <xen/guest_access.h>
ack@13295 20 #include <xen/hypercall.h>
kaf24@11219 21 #include <xen/errno.h>
kaf24@6486 22 #include <asm/current.h>
kaf24@6486 23 #include <asm/hardirq.h>
kaf24@6486 24 #include <public/memory.h>
kfraser@15815 25 #include <xsm/xsm.h>
kaf24@6486 26
kfraser@12374 27 struct memop_args {
kfraser@12374 28 /* INPUT */
kfraser@12374 29 struct domain *domain; /* Domain to be affected. */
kfraser@12374 30 XEN_GUEST_HANDLE(xen_pfn_t) extent_list; /* List of extent base addrs. */
kfraser@12374 31 unsigned int nr_extents; /* Number of extents to allocate or free. */
kfraser@12374 32 unsigned int extent_order; /* Size of each extent. */
kfraser@12374 33 unsigned int memflags; /* Allocation flags. */
kfraser@12374 34
kfraser@12374 35 /* INPUT/OUTPUT */
kfraser@12374 36 unsigned int nr_done; /* Number of extents processed so far. */
kfraser@12374 37 int preempted; /* Was the hypercall preempted? */
kfraser@12374 38 };
kfraser@12374 39
kfraser@12374 40 static unsigned int select_local_cpu(struct domain *d)
kfraser@12374 41 {
kfraser@12374 42 struct vcpu *v = d->vcpu[0];
kfraser@12374 43 return (v ? v->processor : 0);
kfraser@12374 44 }
kfraser@12374 45
kfraser@12374 46 static void increase_reservation(struct memop_args *a)
kaf24@6486 47 {
kaf24@8726 48 struct page_info *page;
kaf24@10314 49 unsigned long i;
kaf24@10314 50 xen_pfn_t mfn;
kfraser@12374 51 struct domain *d = a->domain;
kfraser@12374 52 unsigned int cpu = select_local_cpu(d);
kaf24@6486 53
kfraser@12374 54 if ( !guest_handle_is_null(a->extent_list) &&
kfraser@12374 55 !guest_handle_okay(a->extent_list, a->nr_extents) )
kfraser@12374 56 return;
kaf24@6486 57
kfraser@12374 58 if ( (a->extent_order != 0) &&
kaf24@8468 59 !multipage_allocation_permitted(current->domain) )
kfraser@12374 60 return;
kaf24@6486 61
kfraser@12374 62 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@6486 63 {
kaf24@6486 64 if ( hypercall_preempt_check() )
kaf24@6607 65 {
kfraser@12374 66 a->preempted = 1;
kfraser@12374 67 goto out;
kaf24@6607 68 }
kaf24@6486 69
kfraser@12374 70 page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
kfraser@12374 71 if ( unlikely(page == NULL) )
kaf24@6486 72 {
kaf24@12038 73 gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
kfraser@10418 74 "id=%d memflags=%x (%ld of %d)\n",
kfraser@12374 75 a->extent_order, d->domain_id, a->memflags,
kfraser@12374 76 i, a->nr_extents);
kfraser@12374 77 goto out;
kaf24@6486 78 }
kaf24@6486 79
kaf24@6486 80 /* Inform the domain of the new page's machine address. */
kfraser@12374 81 if ( !guest_handle_is_null(a->extent_list) )
kaf24@8859 82 {
kaf24@8859 83 mfn = page_to_mfn(page);
kfraser@12374 84 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
kfraser@12374 85 goto out;
kaf24@8859 86 }
kaf24@6486 87 }
kaf24@6486 88
kfraser@12374 89 out:
kfraser@12374 90 a->nr_done = i;
kaf24@6486 91 }
sos22@8688 92
kfraser@12374 93 static void populate_physmap(struct memop_args *a)
kaf24@8673 94 {
kaf24@8726 95 struct page_info *page;
kaf24@10314 96 unsigned long i, j;
kfraser@12374 97 xen_pfn_t gpfn, mfn;
kfraser@12374 98 struct domain *d = a->domain;
kfraser@12374 99 unsigned int cpu = select_local_cpu(d);
kaf24@8673 100
kfraser@12374 101 if ( !guest_handle_okay(a->extent_list, a->nr_extents) )
kfraser@12374 102 return;
kaf24@8673 103
kfraser@12374 104 if ( (a->extent_order != 0) &&
kaf24@8673 105 !multipage_allocation_permitted(current->domain) )
kfraser@12374 106 return;
kaf24@8673 107
kfraser@12374 108 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@8673 109 {
kaf24@8673 110 if ( hypercall_preempt_check() )
kaf24@8673 111 {
kfraser@12374 112 a->preempted = 1;
sos22@8688 113 goto out;
kaf24@8673 114 }
kaf24@8673 115
kfraser@12374 116 if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
kaf24@8859 117 goto out;
kaf24@8859 118
kfraser@12374 119 page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
kfraser@12374 120 if ( unlikely(page == NULL) )
kaf24@8673 121 {
kaf24@12038 122 gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
kfraser@12374 123 "id=%d memflags=%x (%ld of %d)\n",
kfraser@12374 124 a->extent_order, d->domain_id, a->memflags,
kfraser@12374 125 i, a->nr_extents);
sos22@8688 126 goto out;
kaf24@8673 127 }
kaf24@8673 128
kaf24@8726 129 mfn = page_to_mfn(page);
kaf24@8673 130
Tim@15635 131 if ( unlikely(paging_mode_translate(d)) )
kaf24@8694 132 {
kfraser@12374 133 for ( j = 0; j < (1 << a->extent_order); j++ )
keir@16291 134 if ( guest_physmap_add_page(d, gpfn + j, mfn + j) )
keir@16291 135 goto out;
sos22@8688 136 }
kaf24@8694 137 else
kaf24@8694 138 {
kfraser@12374 139 for ( j = 0; j < (1 << a->extent_order); j++ )
kaf24@8736 140 set_gpfn_from_mfn(mfn + j, gpfn + j);
kaf24@8673 141
sos22@8688 142 /* Inform the domain of the new page's machine address. */
kfraser@12374 143 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
sos22@8688 144 goto out;
sos22@8688 145 }
kaf24@8673 146 }
kaf24@8673 147
sos22@8688 148 out:
kfraser@12374 149 a->nr_done = i;
kaf24@8673 150 }
cl349@9211 151
kfraser@12374 152 int guest_remove_page(struct domain *d, unsigned long gmfn)
cl349@9211 153 {
cl349@9211 154 struct page_info *page;
cl349@9211 155 unsigned long mfn;
cl349@9211 156
cl349@9211 157 mfn = gmfn_to_mfn(d, gmfn);
cl349@9211 158 if ( unlikely(!mfn_valid(mfn)) )
cl349@9211 159 {
kaf24@12038 160 gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
tdeegan@11172 161 d->domain_id, gmfn);
cl349@9211 162 return 0;
cl349@9211 163 }
cl349@9211 164
cl349@9211 165 page = mfn_to_page(mfn);
cl349@9211 166 if ( unlikely(!get_page(page, d)) )
cl349@9211 167 {
kaf24@12038 168 gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id);
cl349@9211 169 return 0;
cl349@9211 170 }
cl349@9211 171
cl349@9211 172 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
cl349@9211 173 put_page_and_type(page);
cl349@9211 174
cl349@9211 175 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
cl349@9211 176 put_page(page);
cl349@9211 177
kfraser@11212 178 guest_physmap_remove_page(d, gmfn, mfn);
cl349@9211 179
cl349@9211 180 put_page(page);
cl349@9211 181
cl349@9211 182 return 1;
cl349@9211 183 }
cl349@9211 184
kfraser@12374 185 static void decrease_reservation(struct memop_args *a)
kaf24@6486 186 {
kaf24@10314 187 unsigned long i, j;
kaf24@10314 188 xen_pfn_t gmfn;
kaf24@6486 189
kfraser@12374 190 if ( !guest_handle_okay(a->extent_list, a->nr_extents) )
kfraser@12374 191 return;
kaf24@6486 192
kfraser@12374 193 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@6486 194 {
kaf24@6486 195 if ( hypercall_preempt_check() )
kaf24@6607 196 {
kfraser@12374 197 a->preempted = 1;
kfraser@12374 198 goto out;
kaf24@6607 199 }
kaf24@6486 200
kfraser@12374 201 if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
kfraser@12374 202 goto out;
kaf24@6486 203
kfraser@12374 204 for ( j = 0; j < (1 << a->extent_order); j++ )
kfraser@12374 205 if ( !guest_remove_page(a->domain, gmfn + j) )
kfraser@12374 206 goto out;
kaf24@6486 207 }
kaf24@6486 208
kfraser@12374 209 out:
kfraser@12374 210 a->nr_done = i;
kaf24@6486 211 }
kaf24@6486 212
kfraser@12374 213 static long translate_gpfn_list(
kaf24@9873 214 XEN_GUEST_HANDLE(xen_translate_gpfn_list_t) uop, unsigned long *progress)
kaf24@8871 215 {
kaf24@8871 216 struct xen_translate_gpfn_list op;
kaf24@10314 217 unsigned long i;
kaf24@10314 218 xen_pfn_t gpfn;
kaf24@10314 219 xen_pfn_t mfn;
kaf24@8871 220 struct domain *d;
kfraser@15815 221 int rc;
kaf24@6486 222
kaf24@9068 223 if ( copy_from_guest(&op, uop, 1) )
kaf24@8871 224 return -EFAULT;
kaf24@8871 225
kaf24@8871 226 /* Is size too large for us to encode a continuation? */
ack@13295 227 if ( op.nr_gpfns > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) )
kaf24@8871 228 return -EINVAL;
kaf24@8871 229
kaf24@9068 230 if ( !guest_handle_okay(op.gpfn_list, op.nr_gpfns) ||
kaf24@9068 231 !guest_handle_okay(op.mfn_list, op.nr_gpfns) )
kaf24@8871 232 return -EFAULT;
kaf24@8871 233
kaf24@8871 234 if ( op.domid == DOMID_SELF )
keir@16856 235 d = current->domain;
keir@16856 236 else {
keir@16856 237 d = rcu_lock_domain_by_id(op.domid);
keir@16856 238 if ( d == NULL )
keir@16856 239 return -ESRCH;
keir@16856 240 if ( !IS_PRIV_FOR(current->domain, d) ) {
keir@16856 241 rcu_unlock_domain(d);
keir@16856 242 return -EPERM;
keir@16856 243 }
keir@16856 244 }
kaf24@8871 245
kaf24@8871 246
Tim@15635 247 if ( !paging_mode_translate(d) )
kaf24@8871 248 {
kfraser@14192 249 rcu_unlock_domain(d);
kaf24@8871 250 return -EINVAL;
kaf24@8871 251 }
kaf24@8871 252
kaf24@8871 253 for ( i = *progress; i < op.nr_gpfns; i++ )
kaf24@8871 254 {
kaf24@8871 255 if ( hypercall_preempt_check() )
kaf24@8871 256 {
kfraser@14192 257 rcu_unlock_domain(d);
kaf24@8871 258 *progress = i;
kaf24@8871 259 return -EAGAIN;
kaf24@8871 260 }
kaf24@8871 261
kaf24@9068 262 if ( unlikely(__copy_from_guest_offset(&gpfn, op.gpfn_list, i, 1)) )
kaf24@8871 263 {
kfraser@14192 264 rcu_unlock_domain(d);
kaf24@8871 265 return -EFAULT;
kaf24@8871 266 }
kaf24@8871 267
kaf24@8871 268 mfn = gmfn_to_mfn(d, gpfn);
kaf24@8871 269
kfraser@15815 270 rc = xsm_translate_gpfn_list(current->domain, mfn);
kfraser@15815 271 if ( rc )
kfraser@15815 272 {
kfraser@15815 273 rcu_unlock_domain(d);
kfraser@15815 274 return rc;
kfraser@15815 275 }
kfraser@15815 276
kaf24@9068 277 if ( unlikely(__copy_to_guest_offset(op.mfn_list, i, &mfn, 1)) )
kaf24@8871 278 {
kfraser@14192 279 rcu_unlock_domain(d);
kaf24@8871 280 return -EFAULT;
kaf24@8871 281 }
kaf24@8871 282 }
kaf24@8871 283
kfraser@14192 284 rcu_unlock_domain(d);
kaf24@8871 285 return 0;
kaf24@8871 286 }
kaf24@8871 287
kfraser@12374 288 static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
kfraser@10418 289 {
kfraser@10418 290 struct xen_memory_exchange exch;
kfraser@10418 291 LIST_HEAD(in_chunk_list);
kfraser@10418 292 LIST_HEAD(out_chunk_list);
kfraser@10418 293 unsigned long in_chunk_order, out_chunk_order;
kaf24@10459 294 xen_pfn_t gpfn, gmfn, mfn;
kfraser@10418 295 unsigned long i, j, k;
kfraser@11973 296 unsigned int memflags = 0, cpu;
kfraser@10418 297 long rc = 0;
kfraser@10418 298 struct domain *d;
kfraser@10418 299 struct page_info *page;
kfraser@10418 300
kfraser@10418 301 if ( copy_from_guest(&exch, arg, 1) )
kfraser@10418 302 return -EFAULT;
kfraser@10418 303
kfraser@10418 304 /* Various sanity checks. */
kfraser@10418 305 if ( (exch.nr_exchanged > exch.in.nr_extents) ||
kfraser@10418 306 /* Input and output domain identifiers match? */
kfraser@10418 307 (exch.in.domid != exch.out.domid) ||
kfraser@10418 308 /* Sizes of input and output lists do not overflow a long? */
kfraser@10418 309 ((~0UL >> exch.in.extent_order) < exch.in.nr_extents) ||
kfraser@10418 310 ((~0UL >> exch.out.extent_order) < exch.out.nr_extents) ||
kfraser@10418 311 /* Sizes of input and output lists match? */
kfraser@10418 312 ((exch.in.nr_extents << exch.in.extent_order) !=
kfraser@10418 313 (exch.out.nr_extents << exch.out.extent_order)) )
kfraser@10418 314 {
kfraser@10418 315 rc = -EINVAL;
kfraser@10418 316 goto fail_early;
kfraser@10418 317 }
kfraser@10418 318
kfraser@10418 319 /* Only privileged guests can allocate multi-page contiguous extents. */
kfraser@10418 320 if ( ((exch.in.extent_order != 0) || (exch.out.extent_order != 0)) &&
kfraser@10418 321 !multipage_allocation_permitted(current->domain) )
kfraser@10418 322 {
kfraser@10418 323 rc = -EPERM;
kfraser@10418 324 goto fail_early;
kfraser@10418 325 }
kfraser@10418 326
kfraser@10418 327 if ( exch.in.extent_order <= exch.out.extent_order )
kfraser@10418 328 {
kfraser@10418 329 in_chunk_order = exch.out.extent_order - exch.in.extent_order;
kfraser@10418 330 out_chunk_order = 0;
kfraser@10418 331 }
kfraser@10418 332 else
kfraser@10418 333 {
kfraser@10418 334 in_chunk_order = 0;
kfraser@10418 335 out_chunk_order = exch.in.extent_order - exch.out.extent_order;
kfraser@10418 336 }
kfraser@10418 337
kfraser@10418 338 /*
kfraser@10418 339 * Only support exchange on calling domain right now. Otherwise there are
kfraser@14642 340 * tricky corner cases to consider (e.g., dying domain).
kfraser@10418 341 */
kfraser@10418 342 if ( unlikely(exch.in.domid != DOMID_SELF) )
kfraser@10418 343 {
kfraser@10418 344 rc = IS_PRIV(current->domain) ? -EINVAL : -EPERM;
kfraser@10418 345 goto fail_early;
kfraser@10418 346 }
kfraser@10418 347 d = current->domain;
kfraser@10418 348
keir@16548 349 memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
keir@16549 350 d, exch.out.address_bits ? : (BITS_PER_LONG+PAGE_SHIFT)));
keir@16548 351
kfraser@12374 352 cpu = select_local_cpu(d);
kfraser@11973 353
kfraser@12374 354 for ( i = (exch.nr_exchanged >> in_chunk_order);
kfraser@12374 355 i < (exch.in.nr_extents >> in_chunk_order);
kfraser@12374 356 i++ )
kfraser@10418 357 {
kfraser@10418 358 if ( hypercall_preempt_check() )
kfraser@10418 359 {
kfraser@12374 360 exch.nr_exchanged = i << in_chunk_order;
kfraser@10418 361 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 362 return -EFAULT;
kfraser@10418 363 return hypercall_create_continuation(
kfraser@10418 364 __HYPERVISOR_memory_op, "lh", XENMEM_exchange, arg);
kfraser@10418 365 }
kfraser@10418 366
kfraser@10418 367 /* Steal a chunk's worth of input pages from the domain. */
kfraser@10418 368 for ( j = 0; j < (1UL << in_chunk_order); j++ )
kfraser@10418 369 {
kfraser@10418 370 if ( unlikely(__copy_from_guest_offset(
kfraser@10418 371 &gmfn, exch.in.extent_start, (i<<in_chunk_order)+j, 1)) )
kfraser@10418 372 {
kfraser@10418 373 rc = -EFAULT;
kfraser@10418 374 goto fail;
kfraser@10418 375 }
kfraser@10418 376
kfraser@10418 377 for ( k = 0; k < (1UL << exch.in.extent_order); k++ )
kfraser@10418 378 {
kfraser@10418 379 mfn = gmfn_to_mfn(d, gmfn + k);
kfraser@10418 380 if ( unlikely(!mfn_valid(mfn)) )
kfraser@10418 381 {
kfraser@10418 382 rc = -EINVAL;
kfraser@10418 383 goto fail;
kfraser@10418 384 }
kfraser@10418 385
kfraser@10418 386 page = mfn_to_page(mfn);
kfraser@10418 387
kfraser@10418 388 if ( unlikely(steal_page(d, page, MEMF_no_refcount)) )
kfraser@10418 389 {
kfraser@10418 390 rc = -EINVAL;
kfraser@10418 391 goto fail;
kfraser@10418 392 }
kfraser@10418 393
kfraser@10418 394 list_add(&page->list, &in_chunk_list);
kfraser@10418 395 }
kfraser@10418 396 }
kfraser@10418 397
kfraser@10418 398 /* Allocate a chunk's worth of anonymous output pages. */
kfraser@10418 399 for ( j = 0; j < (1UL << out_chunk_order); j++ )
kfraser@10418 400 {
kfraser@12374 401 page = __alloc_domheap_pages(
kfraser@12374 402 NULL, cpu, exch.out.extent_order, memflags);
kfraser@10418 403 if ( unlikely(page == NULL) )
kfraser@10418 404 {
kfraser@10418 405 rc = -ENOMEM;
kfraser@10418 406 goto fail;
kfraser@10418 407 }
kfraser@10418 408
kfraser@10418 409 list_add(&page->list, &out_chunk_list);
kfraser@10418 410 }
kfraser@10418 411
kfraser@10418 412 /*
kfraser@10418 413 * Success! Beyond this point we cannot fail for this chunk.
kfraser@10418 414 */
kfraser@10418 415
kfraser@10418 416 /* Destroy final reference to each input page. */
kfraser@10418 417 while ( !list_empty(&in_chunk_list) )
kfraser@10418 418 {
kfraser@10418 419 page = list_entry(in_chunk_list.next, struct page_info, list);
kfraser@10418 420 list_del(&page->list);
kfraser@10418 421 if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
kfraser@10418 422 BUG();
kfraser@10418 423 mfn = page_to_mfn(page);
kfraser@10418 424 guest_physmap_remove_page(d, mfn_to_gmfn(d, mfn), mfn);
kfraser@10418 425 put_page(page);
kfraser@10418 426 }
kfraser@10418 427
kfraser@10418 428 /* Assign each output page to the domain. */
kfraser@10418 429 j = 0;
kfraser@10418 430 while ( !list_empty(&out_chunk_list) )
kfraser@10418 431 {
kfraser@10418 432 page = list_entry(out_chunk_list.next, struct page_info, list);
kfraser@10418 433 list_del(&page->list);
kfraser@10418 434 if ( assign_pages(d, page, exch.out.extent_order,
kfraser@10418 435 MEMF_no_refcount) )
kfraser@10418 436 BUG();
kfraser@10418 437
kfraser@10418 438 /* Note that we ignore errors accessing the output extent list. */
kfraser@10418 439 (void)__copy_from_guest_offset(
kfraser@10418 440 &gpfn, exch.out.extent_start, (i<<out_chunk_order)+j, 1);
kfraser@10418 441
kfraser@10418 442 mfn = page_to_mfn(page);
Tim@15635 443 if ( unlikely(paging_mode_translate(d)) )
kfraser@10418 444 {
keir@16291 445 /* Ignore failure here. There's nothing we can do. */
kfraser@10418 446 for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
keir@16291 447 (void)guest_physmap_add_page(d, gpfn + k, mfn + k);
kfraser@10418 448 }
kfraser@10418 449 else
kfraser@10418 450 {
kfraser@10418 451 for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
kfraser@10418 452 set_gpfn_from_mfn(mfn + k, gpfn + k);
kfraser@10418 453 (void)__copy_to_guest_offset(
kfraser@10418 454 exch.out.extent_start, (i<<out_chunk_order)+j, &mfn, 1);
kfraser@10418 455 }
kfraser@10418 456
kfraser@10418 457 j++;
kfraser@10418 458 }
kfraser@10418 459 BUG_ON(j != (1UL << out_chunk_order));
kfraser@10418 460 }
kfraser@10418 461
kfraser@12374 462 exch.nr_exchanged = exch.in.nr_extents;
kfraser@10418 463 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 464 rc = -EFAULT;
kfraser@10418 465 return rc;
kfraser@10418 466
kfraser@10418 467 /*
kfraser@10418 468 * Failed a chunk! Free any partial chunk work. Tell caller how many
kfraser@10418 469 * chunks succeeded.
kfraser@10418 470 */
kfraser@10418 471 fail:
kfraser@10418 472 /* Reassign any input pages we managed to steal. */
kfraser@10418 473 while ( !list_empty(&in_chunk_list) )
kfraser@10418 474 {
kfraser@10418 475 page = list_entry(in_chunk_list.next, struct page_info, list);
kfraser@10418 476 list_del(&page->list);
kfraser@10418 477 if ( assign_pages(d, page, 0, MEMF_no_refcount) )
kfraser@10418 478 BUG();
kfraser@10418 479 }
kfraser@10418 480
kfraser@10418 481 /* Free any output pages we managed to allocate. */
kfraser@10418 482 while ( !list_empty(&out_chunk_list) )
kfraser@10418 483 {
kfraser@10418 484 page = list_entry(out_chunk_list.next, struct page_info, list);
kfraser@10418 485 list_del(&page->list);
kfraser@10418 486 free_domheap_pages(page, exch.out.extent_order);
kfraser@10418 487 }
kfraser@10418 488
kfraser@12374 489 exch.nr_exchanged = i << in_chunk_order;
kfraser@10418 490
kfraser@10418 491 fail_early:
kfraser@10418 492 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 493 rc = -EFAULT;
kfraser@10418 494 return rc;
kfraser@10418 495 }
kfraser@10418 496
kaf24@9873 497 long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE(void) arg)
kaf24@6486 498 {
kaf24@6486 499 struct domain *d;
kfraser@12374 500 int rc, op;
kaf24@8871 501 unsigned long start_extent, progress;
kaf24@6486 502 struct xen_memory_reservation reservation;
kfraser@12374 503 struct memop_args args;
kaf24@7959 504 domid_t domid;
kaf24@6486 505
ack@13295 506 op = cmd & MEMOP_CMD_MASK;
kaf24@6486 507
kaf24@6486 508 switch ( op )
kaf24@6486 509 {
kaf24@6486 510 case XENMEM_increase_reservation:
kaf24@6486 511 case XENMEM_decrease_reservation:
kaf24@8673 512 case XENMEM_populate_physmap:
ack@13295 513 start_extent = cmd >> MEMOP_EXTENT_SHIFT;
kfraser@10418 514
kaf24@9068 515 if ( copy_from_guest(&reservation, arg, 1) )
kfraser@10418 516 return start_extent;
kaf24@6486 517
kaf24@8871 518 /* Is size too large for us to encode a continuation? */
ack@13295 519 if ( reservation.nr_extents > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) )
kfraser@10418 520 return start_extent;
kaf24@8871 521
kaf24@6486 522 if ( unlikely(start_extent > reservation.nr_extents) )
kfraser@10418 523 return start_extent;
kaf24@9068 524
kfraser@12374 525 args.extent_list = reservation.extent_start;
kfraser@12374 526 args.nr_extents = reservation.nr_extents;
kfraser@12374 527 args.extent_order = reservation.extent_order;
kfraser@12374 528 args.nr_done = start_extent;
kfraser@12374 529 args.preempted = 0;
kfraser@12374 530 args.memflags = 0;
kaf24@6486 531
kaf24@6701 532 if ( (reservation.address_bits != 0) &&
kaf24@6702 533 (reservation.address_bits <
kaf24@6702 534 (get_order_from_pages(max_page) + PAGE_SHIFT)) )
kaf24@6486 535 {
kfraser@14103 536 if ( reservation.address_bits <= PAGE_SHIFT )
kfraser@10418 537 return start_extent;
kfraser@14103 538 args.memflags = MEMF_bits(reservation.address_bits);
kaf24@6486 539 }
kaf24@6486 540
kaf24@6486 541 if ( likely(reservation.domid == DOMID_SELF) )
kaf24@6486 542 d = current->domain;
keir@16856 543 else {
keir@16856 544 d = rcu_lock_domain_by_id(reservation.domid);
keir@16856 545 if ( d == NULL)
keir@16856 546 return start_extent;
keir@16856 547 if ( !IS_PRIV_FOR(current->domain, d) ) {
keir@16856 548 rcu_unlock_domain(d);
keir@16856 549 return start_extent;
keir@16856 550 }
keir@16856 551 }
kfraser@12374 552 args.domain = d;
kaf24@6486 553
kfraser@15815 554 rc = xsm_memory_adjust_reservation(current->domain, d);
kfraser@15815 555 if ( rc )
kfraser@15815 556 {
kfraser@15815 557 if ( reservation.domid != DOMID_SELF )
kfraser@15815 558 rcu_unlock_domain(d);
kfraser@15815 559 return rc;
kfraser@15815 560 }
kfraser@15815 561
kaf24@8673 562 switch ( op )
kaf24@8673 563 {
kaf24@8673 564 case XENMEM_increase_reservation:
kfraser@12374 565 increase_reservation(&args);
kaf24@8673 566 break;
kaf24@8673 567 case XENMEM_decrease_reservation:
kfraser@12374 568 decrease_reservation(&args);
kaf24@8673 569 break;
kfraser@12374 570 default: /* XENMEM_populate_physmap */
kfraser@12374 571 populate_physmap(&args);
kaf24@8673 572 break;
kaf24@8673 573 }
kaf24@6486 574
kaf24@6486 575 if ( unlikely(reservation.domid != DOMID_SELF) )
kfraser@14192 576 rcu_unlock_domain(d);
kaf24@6486 577
kfraser@12374 578 rc = args.nr_done;
kaf24@6486 579
kfraser@12374 580 if ( args.preempted )
kaf24@9068 581 return hypercall_create_continuation(
kaf24@9068 582 __HYPERVISOR_memory_op, "lh",
ack@13295 583 op | (rc << MEMOP_EXTENT_SHIFT), arg);
kaf24@6607 584
kaf24@6486 585 break;
kaf24@6486 586
kfraser@10418 587 case XENMEM_exchange:
kfraser@10418 588 rc = memory_exchange(guest_handle_cast(arg, xen_memory_exchange_t));
kfraser@10418 589 break;
kfraser@10418 590
kaf24@6486 591 case XENMEM_maximum_ram_page:
kaf24@7959 592 rc = max_page;
kaf24@7959 593 break;
kaf24@7959 594
kaf24@7959 595 case XENMEM_current_reservation:
kaf24@7959 596 case XENMEM_maximum_reservation:
kfraser@14471 597 case XENMEM_maximum_gpfn:
kaf24@9068 598 if ( copy_from_guest(&domid, arg, 1) )
kaf24@6486 599 return -EFAULT;
kaf24@7959 600
kaf24@9068 601 if ( likely(domid == DOMID_SELF) )
kaf24@7959 602 d = current->domain;
keir@16856 603 else {
keir@16856 604 d = rcu_lock_domain_by_id(domid);
keir@16856 605 if ( d == NULL )
keir@16856 606 return -ESRCH;
keir@16856 607 if ( !IS_PRIV_FOR(current->domain, d) ) {
keir@16856 608 rcu_unlock_domain(d);
keir@16856 609 return -EPERM;
keir@16856 610 }
keir@16856 611 }
kaf24@7959 612
kfraser@15815 613 rc = xsm_memory_stat_reservation(current->domain, d);
kfraser@15815 614 if ( rc )
kfraser@15815 615 {
kfraser@15815 616 if ( domid != DOMID_SELF )
kfraser@15815 617 rcu_unlock_domain(d);
kfraser@15815 618 return rc;
kfraser@15815 619 }
kfraser@15815 620
kfraser@14471 621 switch ( op )
kfraser@14471 622 {
kfraser@14471 623 case XENMEM_current_reservation:
kfraser@14471 624 rc = d->tot_pages;
kfraser@14471 625 break;
kfraser@14471 626 case XENMEM_maximum_reservation:
kfraser@14471 627 rc = d->max_pages;
kfraser@14471 628 break;
kfraser@14471 629 default:
kfraser@14471 630 ASSERT(op == XENMEM_maximum_gpfn);
kfraser@14471 631 rc = domain_get_maximum_gpfn(d);
kfraser@14471 632 break;
kfraser@14471 633 }
kaf24@7959 634
kaf24@7959 635 if ( unlikely(domid != DOMID_SELF) )
kfraser@14192 636 rcu_unlock_domain(d);
kaf24@7959 637
kaf24@6486 638 break;
kaf24@6486 639
kaf24@8871 640 case XENMEM_translate_gpfn_list:
ack@13295 641 progress = cmd >> MEMOP_EXTENT_SHIFT;
kaf24@9068 642 rc = translate_gpfn_list(
kaf24@9068 643 guest_handle_cast(arg, xen_translate_gpfn_list_t),
kaf24@9068 644 &progress);
kaf24@8871 645 if ( rc == -EAGAIN )
kaf24@9068 646 return hypercall_create_continuation(
kaf24@9068 647 __HYPERVISOR_memory_op, "lh",
ack@13295 648 op | (progress << MEMOP_EXTENT_SHIFT), arg);
kaf24@8871 649 break;
kaf24@8871 650
kaf24@6486 651 default:
kaf24@8059 652 rc = arch_memory_op(op, arg);
kaf24@6486 653 break;
kaf24@6486 654 }
kaf24@6486 655
kaf24@6486 656 return rc;
kaf24@6486 657 }
kaf24@6486 658
kaf24@6486 659 /*
kaf24@6486 660 * Local variables:
kaf24@6486 661 * mode: C
kaf24@6486 662 * c-set-style: "BSD"
kaf24@6486 663 * c-basic-offset: 4
kaf24@6486 664 * tab-width: 4
kaf24@6486 665 * indent-tabs-mode: nil
kaf24@6486 666 * End:
kaf24@6486 667 */