ia64/xen-unstable

annotate xen/common/memory.c @ 12374:5cdd4da17036

[XEN] memory_op hypercall does not use guest_handle_add_offset().

It was causing compatibility issues across architectures as
on x86 the effect would not persist across a continuation. On
x86/64 and powerpc (both of which use xencomm) the effect would
persist. This patch sidesteps the whole issue.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Nov 10 14:22:17 2006 +0000 (2006-11-10)
parents 36679b74e24a
children a4ba47e9bc1f
rev   line source
kaf24@6486 1 /******************************************************************************
kaf24@6486 2 * memory.c
kaf24@6486 3 *
kaf24@6486 4 * Code to handle memory-related requests.
kaf24@6486 5 *
kaf24@6486 6 * Copyright (c) 2003-2004, B Dragovic
kaf24@6486 7 * Copyright (c) 2003-2005, K A Fraser
kaf24@6486 8 */
kaf24@6486 9
kaf24@6486 10 #include <xen/config.h>
kaf24@6486 11 #include <xen/types.h>
kaf24@6486 12 #include <xen/lib.h>
kaf24@6486 13 #include <xen/mm.h>
kaf24@6486 14 #include <xen/perfc.h>
kaf24@6486 15 #include <xen/sched.h>
kaf24@6486 16 #include <xen/event.h>
kaf24@6486 17 #include <xen/shadow.h>
kaf24@8468 18 #include <xen/iocap.h>
kaf24@9068 19 #include <xen/guest_access.h>
kaf24@11219 20 #include <xen/errno.h>
kaf24@6486 21 #include <asm/current.h>
kaf24@6486 22 #include <asm/hardirq.h>
kaf24@6486 23 #include <public/memory.h>
kaf24@6486 24
kaf24@8871 25 /*
kaf24@8871 26 * To allow safe resume of do_memory_op() after preemption, we need to know
kaf24@8871 27 * at what point in the page list to resume. For this purpose I steal the
kaf24@8871 28 * high-order bits of the @cmd parameter, which are otherwise unused and zero.
kaf24@8871 29 */
kaf24@8871 30 #define START_EXTENT_SHIFT 4 /* cmd[:4] == start_extent */
kaf24@8871 31
kfraser@12374 32 struct memop_args {
kfraser@12374 33 /* INPUT */
kfraser@12374 34 struct domain *domain; /* Domain to be affected. */
kfraser@12374 35 XEN_GUEST_HANDLE(xen_pfn_t) extent_list; /* List of extent base addrs. */
kfraser@12374 36 unsigned int nr_extents; /* Number of extents to allocate or free. */
kfraser@12374 37 unsigned int extent_order; /* Size of each extent. */
kfraser@12374 38 unsigned int memflags; /* Allocation flags. */
kfraser@12374 39
kfraser@12374 40 /* INPUT/OUTPUT */
kfraser@12374 41 unsigned int nr_done; /* Number of extents processed so far. */
kfraser@12374 42 int preempted; /* Was the hypercall preempted? */
kfraser@12374 43 };
kfraser@12374 44
kfraser@12374 45 static unsigned int select_local_cpu(struct domain *d)
kfraser@12374 46 {
kfraser@12374 47 struct vcpu *v = d->vcpu[0];
kfraser@12374 48 return (v ? v->processor : 0);
kfraser@12374 49 }
kfraser@12374 50
kfraser@12374 51 static void increase_reservation(struct memop_args *a)
kaf24@6486 52 {
kaf24@8726 53 struct page_info *page;
kaf24@10314 54 unsigned long i;
kaf24@10314 55 xen_pfn_t mfn;
kfraser@12374 56 struct domain *d = a->domain;
kfraser@12374 57 unsigned int cpu = select_local_cpu(d);
kaf24@6486 58
kfraser@12374 59 if ( !guest_handle_is_null(a->extent_list) &&
kfraser@12374 60 !guest_handle_okay(a->extent_list, a->nr_extents) )
kfraser@12374 61 return;
kaf24@6486 62
kfraser@12374 63 if ( (a->extent_order != 0) &&
kaf24@8468 64 !multipage_allocation_permitted(current->domain) )
kfraser@12374 65 return;
kaf24@6486 66
kfraser@12374 67 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@6486 68 {
kaf24@6486 69 if ( hypercall_preempt_check() )
kaf24@6607 70 {
kfraser@12374 71 a->preempted = 1;
kfraser@12374 72 goto out;
kaf24@6607 73 }
kaf24@6486 74
kfraser@12374 75 page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
kfraser@12374 76 if ( unlikely(page == NULL) )
kaf24@6486 77 {
kaf24@12038 78 gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
kfraser@10418 79 "id=%d memflags=%x (%ld of %d)\n",
kfraser@12374 80 a->extent_order, d->domain_id, a->memflags,
kfraser@12374 81 i, a->nr_extents);
kfraser@12374 82 goto out;
kaf24@6486 83 }
kaf24@6486 84
kaf24@6486 85 /* Inform the domain of the new page's machine address. */
kfraser@12374 86 if ( !guest_handle_is_null(a->extent_list) )
kaf24@8859 87 {
kaf24@8859 88 mfn = page_to_mfn(page);
kfraser@12374 89 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
kfraser@12374 90 goto out;
kaf24@8859 91 }
kaf24@6486 92 }
kaf24@6486 93
kfraser@12374 94 out:
kfraser@12374 95 a->nr_done = i;
kaf24@6486 96 }
sos22@8688 97
kfraser@12374 98 static void populate_physmap(struct memop_args *a)
kaf24@8673 99 {
kaf24@8726 100 struct page_info *page;
kaf24@10314 101 unsigned long i, j;
kfraser@12374 102 xen_pfn_t gpfn, mfn;
kfraser@12374 103 struct domain *d = a->domain;
kfraser@12374 104 unsigned int cpu = select_local_cpu(d);
kaf24@8673 105
kfraser@12374 106 if ( !guest_handle_okay(a->extent_list, a->nr_extents) )
kfraser@12374 107 return;
kaf24@8673 108
kfraser@12374 109 if ( (a->extent_order != 0) &&
kaf24@8673 110 !multipage_allocation_permitted(current->domain) )
kfraser@12374 111 return;
kaf24@8673 112
kfraser@12374 113 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@8673 114 {
kaf24@8673 115 if ( hypercall_preempt_check() )
kaf24@8673 116 {
kfraser@12374 117 a->preempted = 1;
sos22@8688 118 goto out;
kaf24@8673 119 }
kaf24@8673 120
kfraser@12374 121 if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
kaf24@8859 122 goto out;
kaf24@8859 123
kfraser@12374 124 page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
kfraser@12374 125 if ( unlikely(page == NULL) )
kaf24@8673 126 {
kaf24@12038 127 gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
kfraser@12374 128 "id=%d memflags=%x (%ld of %d)\n",
kfraser@12374 129 a->extent_order, d->domain_id, a->memflags,
kfraser@12374 130 i, a->nr_extents);
sos22@8688 131 goto out;
kaf24@8673 132 }
kaf24@8673 133
kaf24@8726 134 mfn = page_to_mfn(page);
kaf24@8673 135
kaf24@8694 136 if ( unlikely(shadow_mode_translate(d)) )
kaf24@8694 137 {
kfraser@12374 138 for ( j = 0; j < (1 << a->extent_order); j++ )
kaf24@8736 139 guest_physmap_add_page(d, gpfn + j, mfn + j);
sos22@8688 140 }
kaf24@8694 141 else
kaf24@8694 142 {
kfraser@12374 143 for ( j = 0; j < (1 << a->extent_order); j++ )
kaf24@8736 144 set_gpfn_from_mfn(mfn + j, gpfn + j);
kaf24@8673 145
sos22@8688 146 /* Inform the domain of the new page's machine address. */
kfraser@12374 147 if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
sos22@8688 148 goto out;
sos22@8688 149 }
kaf24@8673 150 }
kaf24@8673 151
sos22@8688 152 out:
kfraser@12374 153 a->nr_done = i;
kaf24@8673 154 }
cl349@9211 155
kfraser@12374 156 int guest_remove_page(struct domain *d, unsigned long gmfn)
cl349@9211 157 {
cl349@9211 158 struct page_info *page;
cl349@9211 159 unsigned long mfn;
cl349@9211 160
cl349@9211 161 mfn = gmfn_to_mfn(d, gmfn);
cl349@9211 162 if ( unlikely(!mfn_valid(mfn)) )
cl349@9211 163 {
kaf24@12038 164 gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
tdeegan@11172 165 d->domain_id, gmfn);
cl349@9211 166 return 0;
cl349@9211 167 }
cl349@9211 168
cl349@9211 169 page = mfn_to_page(mfn);
cl349@9211 170 if ( unlikely(!get_page(page, d)) )
cl349@9211 171 {
kaf24@12038 172 gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id);
cl349@9211 173 return 0;
cl349@9211 174 }
cl349@9211 175
cl349@9211 176 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
cl349@9211 177 put_page_and_type(page);
cl349@9211 178
cl349@9211 179 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
cl349@9211 180 put_page(page);
cl349@9211 181
kfraser@10823 182 if ( unlikely(!page_is_removable(page)) )
kfraser@10584 183 {
kfraser@10584 184 /* We'll make this a guest-visible error in future, so take heed! */
kaf24@12038 185 gdprintk(XENLOG_INFO, "Dom%d freeing in-use page %lx (pseudophys %lx):"
kaf24@10744 186 " count=%lx type=%lx\n",
kfraser@10584 187 d->domain_id, mfn, get_gpfn_from_mfn(mfn),
kaf24@10744 188 (unsigned long)page->count_info, page->u.inuse.type_info);
kfraser@10584 189 }
kfraser@10584 190
kfraser@11212 191 guest_physmap_remove_page(d, gmfn, mfn);
cl349@9211 192
cl349@9211 193 put_page(page);
cl349@9211 194
cl349@9211 195 return 1;
cl349@9211 196 }
cl349@9211 197
kfraser@12374 198 static void decrease_reservation(struct memop_args *a)
kaf24@6486 199 {
kaf24@10314 200 unsigned long i, j;
kaf24@10314 201 xen_pfn_t gmfn;
kaf24@6486 202
kfraser@12374 203 if ( !guest_handle_okay(a->extent_list, a->nr_extents) )
kfraser@12374 204 return;
kaf24@6486 205
kfraser@12374 206 for ( i = a->nr_done; i < a->nr_extents; i++ )
kaf24@6486 207 {
kaf24@6486 208 if ( hypercall_preempt_check() )
kaf24@6607 209 {
kfraser@12374 210 a->preempted = 1;
kfraser@12374 211 goto out;
kaf24@6607 212 }
kaf24@6486 213
kfraser@12374 214 if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
kfraser@12374 215 goto out;
kaf24@6486 216
kfraser@12374 217 for ( j = 0; j < (1 << a->extent_order); j++ )
kfraser@12374 218 if ( !guest_remove_page(a->domain, gmfn + j) )
kfraser@12374 219 goto out;
kaf24@6486 220 }
kaf24@6486 221
kfraser@12374 222 out:
kfraser@12374 223 a->nr_done = i;
kaf24@6486 224 }
kaf24@6486 225
kfraser@12374 226 static long translate_gpfn_list(
kaf24@9873 227 XEN_GUEST_HANDLE(xen_translate_gpfn_list_t) uop, unsigned long *progress)
kaf24@8871 228 {
kaf24@8871 229 struct xen_translate_gpfn_list op;
kaf24@10314 230 unsigned long i;
kaf24@10314 231 xen_pfn_t gpfn;
kaf24@10314 232 xen_pfn_t mfn;
kaf24@8871 233 struct domain *d;
kaf24@6486 234
kaf24@9068 235 if ( copy_from_guest(&op, uop, 1) )
kaf24@8871 236 return -EFAULT;
kaf24@8871 237
kaf24@8871 238 /* Is size too large for us to encode a continuation? */
kaf24@8871 239 if ( op.nr_gpfns > (ULONG_MAX >> START_EXTENT_SHIFT) )
kaf24@8871 240 return -EINVAL;
kaf24@8871 241
kaf24@9068 242 if ( !guest_handle_okay(op.gpfn_list, op.nr_gpfns) ||
kaf24@9068 243 !guest_handle_okay(op.mfn_list, op.nr_gpfns) )
kaf24@8871 244 return -EFAULT;
kaf24@8871 245
kaf24@8871 246 if ( op.domid == DOMID_SELF )
kaf24@8871 247 op.domid = current->domain->domain_id;
kaf24@8871 248 else if ( !IS_PRIV(current->domain) )
kaf24@8871 249 return -EPERM;
kaf24@8871 250
kaf24@8871 251 if ( (d = find_domain_by_id(op.domid)) == NULL )
kaf24@8871 252 return -ESRCH;
kaf24@8871 253
kfraser@11212 254 if ( !shadow_mode_translate(d) )
kaf24@8871 255 {
kaf24@8871 256 put_domain(d);
kaf24@8871 257 return -EINVAL;
kaf24@8871 258 }
kaf24@8871 259
kaf24@8871 260 for ( i = *progress; i < op.nr_gpfns; i++ )
kaf24@8871 261 {
kaf24@8871 262 if ( hypercall_preempt_check() )
kaf24@8871 263 {
kaf24@8871 264 put_domain(d);
kaf24@8871 265 *progress = i;
kaf24@8871 266 return -EAGAIN;
kaf24@8871 267 }
kaf24@8871 268
kaf24@9068 269 if ( unlikely(__copy_from_guest_offset(&gpfn, op.gpfn_list, i, 1)) )
kaf24@8871 270 {
kaf24@8871 271 put_domain(d);
kaf24@8871 272 return -EFAULT;
kaf24@8871 273 }
kaf24@8871 274
kaf24@8871 275 mfn = gmfn_to_mfn(d, gpfn);
kaf24@8871 276
kaf24@9068 277 if ( unlikely(__copy_to_guest_offset(op.mfn_list, i, &mfn, 1)) )
kaf24@8871 278 {
kaf24@8871 279 put_domain(d);
kaf24@8871 280 return -EFAULT;
kaf24@8871 281 }
kaf24@8871 282 }
kaf24@8871 283
kaf24@8871 284 put_domain(d);
kaf24@8871 285 return 0;
kaf24@8871 286 }
kaf24@8871 287
kfraser@12374 288 static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
kfraser@10418 289 {
kfraser@10418 290 struct xen_memory_exchange exch;
kfraser@10418 291 LIST_HEAD(in_chunk_list);
kfraser@10418 292 LIST_HEAD(out_chunk_list);
kfraser@10418 293 unsigned long in_chunk_order, out_chunk_order;
kaf24@10459 294 xen_pfn_t gpfn, gmfn, mfn;
kfraser@10418 295 unsigned long i, j, k;
kfraser@11973 296 unsigned int memflags = 0, cpu;
kfraser@10418 297 long rc = 0;
kfraser@10418 298 struct domain *d;
kfraser@10418 299 struct page_info *page;
kfraser@10418 300
kfraser@10418 301 if ( copy_from_guest(&exch, arg, 1) )
kfraser@10418 302 return -EFAULT;
kfraser@10418 303
kfraser@10418 304 /* Various sanity checks. */
kfraser@10418 305 if ( (exch.nr_exchanged > exch.in.nr_extents) ||
kfraser@10418 306 /* Input and output domain identifiers match? */
kfraser@10418 307 (exch.in.domid != exch.out.domid) ||
kfraser@10418 308 /* Sizes of input and output lists do not overflow a long? */
kfraser@10418 309 ((~0UL >> exch.in.extent_order) < exch.in.nr_extents) ||
kfraser@10418 310 ((~0UL >> exch.out.extent_order) < exch.out.nr_extents) ||
kfraser@10418 311 /* Sizes of input and output lists match? */
kfraser@10418 312 ((exch.in.nr_extents << exch.in.extent_order) !=
kfraser@10418 313 (exch.out.nr_extents << exch.out.extent_order)) )
kfraser@10418 314 {
kfraser@10418 315 rc = -EINVAL;
kfraser@10418 316 goto fail_early;
kfraser@10418 317 }
kfraser@10418 318
kfraser@10418 319 /* Only privileged guests can allocate multi-page contiguous extents. */
kfraser@10418 320 if ( ((exch.in.extent_order != 0) || (exch.out.extent_order != 0)) &&
kfraser@10418 321 !multipage_allocation_permitted(current->domain) )
kfraser@10418 322 {
kfraser@10418 323 rc = -EPERM;
kfraser@10418 324 goto fail_early;
kfraser@10418 325 }
kfraser@10418 326
kfraser@10418 327 if ( (exch.out.address_bits != 0) &&
kfraser@10418 328 (exch.out.address_bits <
kfraser@10418 329 (get_order_from_pages(max_page) + PAGE_SHIFT)) )
kfraser@10418 330 {
kfraser@10418 331 if ( exch.out.address_bits < 31 )
kfraser@10418 332 {
kfraser@10418 333 rc = -ENOMEM;
kfraser@10418 334 goto fail_early;
kfraser@10418 335 }
kfraser@10418 336 memflags = MEMF_dma;
kfraser@10418 337 }
kfraser@10418 338
kfraser@10418 339 if ( exch.in.extent_order <= exch.out.extent_order )
kfraser@10418 340 {
kfraser@10418 341 in_chunk_order = exch.out.extent_order - exch.in.extent_order;
kfraser@10418 342 out_chunk_order = 0;
kfraser@10418 343 }
kfraser@10418 344 else
kfraser@10418 345 {
kfraser@10418 346 in_chunk_order = 0;
kfraser@10418 347 out_chunk_order = exch.in.extent_order - exch.out.extent_order;
kfraser@10418 348 }
kfraser@10418 349
kfraser@10418 350 /*
kfraser@10418 351 * Only support exchange on calling domain right now. Otherwise there are
kfraser@10418 352 * tricky corner cases to consider (e.g., DOMF_dying domain).
kfraser@10418 353 */
kfraser@10418 354 if ( unlikely(exch.in.domid != DOMID_SELF) )
kfraser@10418 355 {
kfraser@10418 356 rc = IS_PRIV(current->domain) ? -EINVAL : -EPERM;
kfraser@10418 357 goto fail_early;
kfraser@10418 358 }
kfraser@10418 359 d = current->domain;
kfraser@10418 360
kfraser@12374 361 cpu = select_local_cpu(d);
kfraser@11973 362
kfraser@12374 363 for ( i = (exch.nr_exchanged >> in_chunk_order);
kfraser@12374 364 i < (exch.in.nr_extents >> in_chunk_order);
kfraser@12374 365 i++ )
kfraser@10418 366 {
kfraser@10418 367 if ( hypercall_preempt_check() )
kfraser@10418 368 {
kfraser@12374 369 exch.nr_exchanged = i << in_chunk_order;
kfraser@10418 370 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 371 return -EFAULT;
kfraser@10418 372 return hypercall_create_continuation(
kfraser@10418 373 __HYPERVISOR_memory_op, "lh", XENMEM_exchange, arg);
kfraser@10418 374 }
kfraser@10418 375
kfraser@10418 376 /* Steal a chunk's worth of input pages from the domain. */
kfraser@10418 377 for ( j = 0; j < (1UL << in_chunk_order); j++ )
kfraser@10418 378 {
kfraser@10418 379 if ( unlikely(__copy_from_guest_offset(
kfraser@10418 380 &gmfn, exch.in.extent_start, (i<<in_chunk_order)+j, 1)) )
kfraser@10418 381 {
kfraser@10418 382 rc = -EFAULT;
kfraser@10418 383 goto fail;
kfraser@10418 384 }
kfraser@10418 385
kfraser@10418 386 for ( k = 0; k < (1UL << exch.in.extent_order); k++ )
kfraser@10418 387 {
kfraser@10418 388 mfn = gmfn_to_mfn(d, gmfn + k);
kfraser@10418 389 if ( unlikely(!mfn_valid(mfn)) )
kfraser@10418 390 {
kfraser@10418 391 rc = -EINVAL;
kfraser@10418 392 goto fail;
kfraser@10418 393 }
kfraser@10418 394
kfraser@10418 395 page = mfn_to_page(mfn);
kfraser@10418 396
kfraser@10418 397 if ( unlikely(steal_page(d, page, MEMF_no_refcount)) )
kfraser@10418 398 {
kfraser@10418 399 rc = -EINVAL;
kfraser@10418 400 goto fail;
kfraser@10418 401 }
kfraser@10418 402
kfraser@10418 403 list_add(&page->list, &in_chunk_list);
kfraser@10418 404 }
kfraser@10418 405 }
kfraser@10418 406
kfraser@10418 407 /* Allocate a chunk's worth of anonymous output pages. */
kfraser@10418 408 for ( j = 0; j < (1UL << out_chunk_order); j++ )
kfraser@10418 409 {
kfraser@12374 410 page = __alloc_domheap_pages(
kfraser@12374 411 NULL, cpu, exch.out.extent_order, memflags);
kfraser@10418 412 if ( unlikely(page == NULL) )
kfraser@10418 413 {
kfraser@10418 414 rc = -ENOMEM;
kfraser@10418 415 goto fail;
kfraser@10418 416 }
kfraser@10418 417
kfraser@10418 418 list_add(&page->list, &out_chunk_list);
kfraser@10418 419 }
kfraser@10418 420
kfraser@10418 421 /*
kfraser@10418 422 * Success! Beyond this point we cannot fail for this chunk.
kfraser@10418 423 */
kfraser@10418 424
kfraser@10418 425 /* Destroy final reference to each input page. */
kfraser@10418 426 while ( !list_empty(&in_chunk_list) )
kfraser@10418 427 {
kfraser@10418 428 page = list_entry(in_chunk_list.next, struct page_info, list);
kfraser@10418 429 list_del(&page->list);
kfraser@10418 430 if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
kfraser@10418 431 BUG();
kfraser@10418 432 mfn = page_to_mfn(page);
kfraser@10418 433 guest_physmap_remove_page(d, mfn_to_gmfn(d, mfn), mfn);
kfraser@10418 434 put_page(page);
kfraser@10418 435 }
kfraser@10418 436
kfraser@10418 437 /* Assign each output page to the domain. */
kfraser@10418 438 j = 0;
kfraser@10418 439 while ( !list_empty(&out_chunk_list) )
kfraser@10418 440 {
kfraser@10418 441 page = list_entry(out_chunk_list.next, struct page_info, list);
kfraser@10418 442 list_del(&page->list);
kfraser@10418 443 if ( assign_pages(d, page, exch.out.extent_order,
kfraser@10418 444 MEMF_no_refcount) )
kfraser@10418 445 BUG();
kfraser@10418 446
kfraser@10418 447 /* Note that we ignore errors accessing the output extent list. */
kfraser@10418 448 (void)__copy_from_guest_offset(
kfraser@10418 449 &gpfn, exch.out.extent_start, (i<<out_chunk_order)+j, 1);
kfraser@10418 450
kfraser@10418 451 mfn = page_to_mfn(page);
kfraser@10418 452 if ( unlikely(shadow_mode_translate(d)) )
kfraser@10418 453 {
kfraser@10418 454 for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
kfraser@10418 455 guest_physmap_add_page(d, gpfn + k, mfn + k);
kfraser@10418 456 }
kfraser@10418 457 else
kfraser@10418 458 {
kfraser@10418 459 for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
kfraser@10418 460 set_gpfn_from_mfn(mfn + k, gpfn + k);
kfraser@10418 461 (void)__copy_to_guest_offset(
kfraser@10418 462 exch.out.extent_start, (i<<out_chunk_order)+j, &mfn, 1);
kfraser@10418 463 }
kfraser@10418 464
kfraser@10418 465 j++;
kfraser@10418 466 }
kfraser@10418 467 BUG_ON(j != (1UL << out_chunk_order));
kfraser@10418 468 }
kfraser@10418 469
kfraser@12374 470 exch.nr_exchanged = exch.in.nr_extents;
kfraser@10418 471 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 472 rc = -EFAULT;
kfraser@10418 473 return rc;
kfraser@10418 474
kfraser@10418 475 /*
kfraser@10418 476 * Failed a chunk! Free any partial chunk work. Tell caller how many
kfraser@10418 477 * chunks succeeded.
kfraser@10418 478 */
kfraser@10418 479 fail:
kfraser@10418 480 /* Reassign any input pages we managed to steal. */
kfraser@10418 481 while ( !list_empty(&in_chunk_list) )
kfraser@10418 482 {
kfraser@10418 483 page = list_entry(in_chunk_list.next, struct page_info, list);
kfraser@10418 484 list_del(&page->list);
kfraser@10418 485 if ( assign_pages(d, page, 0, MEMF_no_refcount) )
kfraser@10418 486 BUG();
kfraser@10418 487 }
kfraser@10418 488
kfraser@10418 489 /* Free any output pages we managed to allocate. */
kfraser@10418 490 while ( !list_empty(&out_chunk_list) )
kfraser@10418 491 {
kfraser@10418 492 page = list_entry(out_chunk_list.next, struct page_info, list);
kfraser@10418 493 list_del(&page->list);
kfraser@10418 494 free_domheap_pages(page, exch.out.extent_order);
kfraser@10418 495 }
kfraser@10418 496
kfraser@12374 497 exch.nr_exchanged = i << in_chunk_order;
kfraser@10418 498
kfraser@10418 499 fail_early:
kfraser@10418 500 if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
kfraser@10418 501 rc = -EFAULT;
kfraser@10418 502 return rc;
kfraser@10418 503 }
kfraser@10418 504
kaf24@9873 505 long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE(void) arg)
kaf24@6486 506 {
kaf24@6486 507 struct domain *d;
kfraser@12374 508 int rc, op;
kaf24@8871 509 unsigned long start_extent, progress;
kaf24@6486 510 struct xen_memory_reservation reservation;
kfraser@12374 511 struct memop_args args;
kaf24@7959 512 domid_t domid;
kaf24@6486 513
kaf24@6486 514 op = cmd & ((1 << START_EXTENT_SHIFT) - 1);
kaf24@6486 515
kaf24@6486 516 switch ( op )
kaf24@6486 517 {
kaf24@6486 518 case XENMEM_increase_reservation:
kaf24@6486 519 case XENMEM_decrease_reservation:
kaf24@8673 520 case XENMEM_populate_physmap:
kfraser@10418 521 start_extent = cmd >> START_EXTENT_SHIFT;
kfraser@10418 522
kaf24@9068 523 if ( copy_from_guest(&reservation, arg, 1) )
kfraser@10418 524 return start_extent;
kaf24@6486 525
kaf24@8871 526 /* Is size too large for us to encode a continuation? */
kaf24@8871 527 if ( reservation.nr_extents > (ULONG_MAX >> START_EXTENT_SHIFT) )
kfraser@10418 528 return start_extent;
kaf24@8871 529
kaf24@6486 530 if ( unlikely(start_extent > reservation.nr_extents) )
kfraser@10418 531 return start_extent;
kaf24@9068 532
kfraser@12374 533 args.extent_list = reservation.extent_start;
kfraser@12374 534 args.nr_extents = reservation.nr_extents;
kfraser@12374 535 args.extent_order = reservation.extent_order;
kfraser@12374 536 args.nr_done = start_extent;
kfraser@12374 537 args.preempted = 0;
kfraser@12374 538 args.memflags = 0;
kaf24@6486 539
kaf24@6701 540 if ( (reservation.address_bits != 0) &&
kaf24@6702 541 (reservation.address_bits <
kaf24@6702 542 (get_order_from_pages(max_page) + PAGE_SHIFT)) )
kaf24@6486 543 {
kaf24@6486 544 if ( reservation.address_bits < 31 )
kfraser@10418 545 return start_extent;
kfraser@12374 546 args.memflags = MEMF_dma;
kaf24@6486 547 }
kaf24@6486 548
kaf24@6486 549 if ( likely(reservation.domid == DOMID_SELF) )
kaf24@6486 550 d = current->domain;
kfraser@10418 551 else if ( !IS_PRIV(current->domain) ||
kfraser@10418 552 ((d = find_domain_by_id(reservation.domid)) == NULL) )
kfraser@10418 553 return start_extent;
kfraser@12374 554 args.domain = d;
kaf24@6486 555
kaf24@8673 556 switch ( op )
kaf24@8673 557 {
kaf24@8673 558 case XENMEM_increase_reservation:
kfraser@12374 559 increase_reservation(&args);
kaf24@8673 560 break;
kaf24@8673 561 case XENMEM_decrease_reservation:
kfraser@12374 562 decrease_reservation(&args);
kaf24@8673 563 break;
kfraser@12374 564 default: /* XENMEM_populate_physmap */
kfraser@12374 565 populate_physmap(&args);
kaf24@8673 566 break;
kaf24@8673 567 }
kaf24@6486 568
kaf24@6486 569 if ( unlikely(reservation.domid != DOMID_SELF) )
kaf24@6486 570 put_domain(d);
kaf24@6486 571
kfraser@12374 572 rc = args.nr_done;
kaf24@6486 573
kfraser@12374 574 if ( args.preempted )
kaf24@9068 575 return hypercall_create_continuation(
kaf24@9068 576 __HYPERVISOR_memory_op, "lh",
kaf24@9068 577 op | (rc << START_EXTENT_SHIFT), arg);
kaf24@6607 578
kaf24@6486 579 break;
kaf24@6486 580
kfraser@10418 581 case XENMEM_exchange:
kfraser@10418 582 rc = memory_exchange(guest_handle_cast(arg, xen_memory_exchange_t));
kfraser@10418 583 break;
kfraser@10418 584
kaf24@6486 585 case XENMEM_maximum_ram_page:
kaf24@7959 586 rc = max_page;
kaf24@7959 587 break;
kaf24@7959 588
kaf24@7959 589 case XENMEM_current_reservation:
kaf24@7959 590 case XENMEM_maximum_reservation:
kaf24@9068 591 if ( copy_from_guest(&domid, arg, 1) )
kaf24@6486 592 return -EFAULT;
kaf24@7959 593
kaf24@9068 594 if ( likely(domid == DOMID_SELF) )
kaf24@7959 595 d = current->domain;
kaf24@7959 596 else if ( !IS_PRIV(current->domain) )
kaf24@7959 597 return -EPERM;
kaf24@7959 598 else if ( (d = find_domain_by_id(domid)) == NULL )
kaf24@7959 599 return -ESRCH;
kaf24@7959 600
kaf24@7959 601 rc = (op == XENMEM_current_reservation) ? d->tot_pages : d->max_pages;
kaf24@7959 602
kaf24@7959 603 if ( unlikely(domid != DOMID_SELF) )
kaf24@7959 604 put_domain(d);
kaf24@7959 605
kaf24@6486 606 break;
kaf24@6486 607
kaf24@8871 608 case XENMEM_translate_gpfn_list:
kaf24@8871 609 progress = cmd >> START_EXTENT_SHIFT;
kaf24@9068 610 rc = translate_gpfn_list(
kaf24@9068 611 guest_handle_cast(arg, xen_translate_gpfn_list_t),
kaf24@9068 612 &progress);
kaf24@8871 613 if ( rc == -EAGAIN )
kaf24@9068 614 return hypercall_create_continuation(
kaf24@9068 615 __HYPERVISOR_memory_op, "lh",
kaf24@9068 616 op | (progress << START_EXTENT_SHIFT), arg);
kaf24@8871 617 break;
kaf24@8871 618
kaf24@6486 619 default:
kaf24@8059 620 rc = arch_memory_op(op, arg);
kaf24@6486 621 break;
kaf24@6486 622 }
kaf24@6486 623
kaf24@6486 624 return rc;
kaf24@6486 625 }
kaf24@6486 626
kaf24@6486 627 /*
kaf24@6486 628 * Local variables:
kaf24@6486 629 * mode: C
kaf24@6486 630 * c-set-style: "BSD"
kaf24@6486 631 * c-basic-offset: 4
kaf24@6486 632 * tab-width: 4
kaf24@6486 633 * indent-tabs-mode: nil
kaf24@6486 634 * End:
kaf24@6486 635 */