direct-io.hg

view xen/common/grant_table.c @ 10734:9b7e1ea4c4d2

[HVM] Sync p2m table across all vcpus on x86_32p xen.
We found VGA acceleration can not work on SMP VMX guests on x86_32p
xen, this is caused by the way we construct p2m table today: only the 1st
l2 page table slot that maps p2m table pages is copied to none-vcpu0 vcpu
monitor page table when VMX is created. But VGA acceleration will
create some p2m table entries beyond the 1st l2 page table slot after HVM is
created, so only vcpu0 can get these p2m entries, and other vcpu can
not do VGA acceleration.

Signed-off-by: Xin Li <xin.b.li@intel.com>
author kfraser@localhost.localdomain
date Wed Jul 26 11:34:12 2006 +0100 (2006-07-26)
parents 2937703f0ed0
children 247fc1245b21
line source
1 /******************************************************************************
2 * common/grant_table.c
3 *
4 * Mechanism for granting foreign access to page frames, and receiving
5 * page-ownership transfers.
6 *
7 * Copyright (c) 2005 Christopher Clark
8 * Copyright (c) 2004 K A Fraser
9 * Copyright (c) 2005 Andrew Warfield
10 * Modifications by Geoffrey Lefebvre are (c) Intel Research Cambridge
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
27 #include <xen/lib.h>
28 #include <xen/sched.h>
29 #include <xen/shadow.h>
30 #include <xen/mm.h>
31 #include <xen/trace.h>
32 #include <xen/guest_access.h>
33 #include <acm/acm_hooks.h>
35 #define PIN_FAIL(_lbl, _rc, _f, _a...) \
36 do { \
37 DPRINTK( _f, ## _a ); \
38 rc = (_rc); \
39 goto _lbl; \
40 } while ( 0 )
42 static inline int
43 get_maptrack_handle(
44 struct grant_table *t)
45 {
46 unsigned int h;
47 if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) )
48 return -1;
49 t->maptrack_head = t->maptrack[h].ref;
50 t->map_count++;
51 return h;
52 }
54 static inline void
55 put_maptrack_handle(
56 struct grant_table *t, int handle)
57 {
58 t->maptrack[handle].ref = t->maptrack_head;
59 t->maptrack_head = handle;
60 t->map_count--;
61 }
63 /*
64 * Returns 0 if TLB flush / invalidate required by caller.
65 * va will indicate the address to be invalidated.
66 *
67 * addr is _either_ a host virtual address, or the address of the pte to
68 * update, as indicated by the GNTMAP_contains_pte flag.
69 */
70 static void
71 __gnttab_map_grant_ref(
72 struct gnttab_map_grant_ref *op)
73 {
74 struct domain *ld, *rd;
75 struct vcpu *led;
76 int handle;
77 unsigned long frame = 0;
78 int rc = GNTST_okay;
79 struct active_grant_entry *act;
81 /* Entry details from @rd's shared grant table. */
82 grant_entry_t *sha;
83 domid_t sdom;
84 u16 sflags;
86 /*
87 * We bound the number of times we retry CMPXCHG on memory locations that
88 * we share with a guest OS. The reason is that the guest can modify that
89 * location at a higher rate than we can read-modify-CMPXCHG, so the guest
90 * could cause us to livelock. There are a few cases where it is valid for
91 * the guest to race our updates (e.g., to change the GTF_readonly flag),
92 * so we allow a few retries before failing.
93 */
94 int retries = 0;
96 led = current;
97 ld = led->domain;
99 if ( unlikely(op->ref >= NR_GRANT_ENTRIES) ||
100 unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) )
101 {
102 DPRINTK("Bad ref (%d) or flags (%x).\n", op->ref, op->flags);
103 op->status = GNTST_bad_gntref;
104 return;
105 }
107 if ( acm_pre_grant_map_ref(op->dom) )
108 {
109 op->status = GNTST_permission_denied;
110 return;
111 }
113 if ( unlikely((rd = find_domain_by_id(op->dom)) == NULL) )
114 {
115 if ( rd != NULL )
116 put_domain(rd);
117 DPRINTK("Could not find domain %d\n", op->dom);
118 op->status = GNTST_bad_domain;
119 return;
120 }
122 /* Get a maptrack handle. */
123 if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) )
124 {
125 int i;
126 struct grant_mapping *new_mt;
127 struct grant_table *lgt = ld->grant_table;
129 if ( (lgt->maptrack_limit << 1) > MAPTRACK_MAX_ENTRIES )
130 {
131 put_domain(rd);
132 DPRINTK("Maptrack table is at maximum size.\n");
133 op->status = GNTST_no_device_space;
134 return;
135 }
137 /* Grow the maptrack table. */
138 new_mt = alloc_xenheap_pages(lgt->maptrack_order + 1);
139 if ( new_mt == NULL )
140 {
141 put_domain(rd);
142 DPRINTK("No more map handles available.\n");
143 op->status = GNTST_no_device_space;
144 return;
145 }
147 memcpy(new_mt, lgt->maptrack, PAGE_SIZE << lgt->maptrack_order);
148 for ( i = lgt->maptrack_limit; i < (lgt->maptrack_limit << 1); i++ )
149 new_mt[i].ref = i+1;
151 free_xenheap_pages(lgt->maptrack, lgt->maptrack_order);
152 lgt->maptrack = new_mt;
153 lgt->maptrack_order += 1;
154 lgt->maptrack_limit <<= 1;
156 DPRINTK("Doubled maptrack size\n");
157 handle = get_maptrack_handle(ld->grant_table);
158 }
160 act = &rd->grant_table->active[op->ref];
161 sha = &rd->grant_table->shared[op->ref];
163 spin_lock(&rd->grant_table->lock);
165 if ( !act->pin ||
166 (!(op->flags & GNTMAP_readonly) &&
167 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask))) )
168 {
169 sflags = sha->flags;
170 sdom = sha->domid;
172 /*
173 * This loop attempts to set the access (reading/writing) flags
174 * in the grant table entry. It tries a cmpxchg on the field
175 * up to five times, and then fails under the assumption that
176 * the guest is misbehaving.
177 */
178 for ( ; ; )
179 {
180 u32 scombo, prev_scombo, new_scombo;
182 if ( unlikely((sflags & GTF_type_mask) != GTF_permit_access) ||
183 unlikely(sdom != led->domain->domain_id) )
184 PIN_FAIL(unlock_out, GNTST_general_error,
185 "Bad flags (%x) or dom (%d). (NB. expected dom %d)\n",
186 sflags, sdom, led->domain->domain_id);
188 /* Merge two 16-bit values into a 32-bit combined update. */
189 /* NB. Endianness! */
190 scombo = ((u32)sdom << 16) | (u32)sflags;
192 new_scombo = scombo | GTF_reading;
193 if ( !(op->flags & GNTMAP_readonly) )
194 {
195 new_scombo |= GTF_writing;
196 if ( unlikely(sflags & GTF_readonly) )
197 PIN_FAIL(unlock_out, GNTST_general_error,
198 "Attempt to write-pin a r/o grant entry.\n");
199 }
201 prev_scombo = cmpxchg((u32 *)&sha->flags, scombo, new_scombo);
203 /* Did the combined update work (did we see what we expected?). */
204 if ( likely(prev_scombo == scombo) )
205 break;
207 if ( retries++ == 4 )
208 PIN_FAIL(unlock_out, GNTST_general_error,
209 "Shared grant entry is unstable.\n");
211 /* Didn't see what we expected. Split out the seen flags & dom. */
212 /* NB. Endianness! */
213 sflags = (u16)prev_scombo;
214 sdom = (u16)(prev_scombo >> 16);
215 }
217 if ( !act->pin )
218 {
219 act->domid = sdom;
220 act->frame = gmfn_to_mfn(rd, sha->frame);
221 }
222 }
223 else if ( (act->pin & 0x80808080U) != 0 )
224 PIN_FAIL(unlock_out, ENOSPC,
225 "Risk of counter overflow %08x\n", act->pin);
227 if ( op->flags & GNTMAP_device_map )
228 act->pin += (op->flags & GNTMAP_readonly) ?
229 GNTPIN_devr_inc : GNTPIN_devw_inc;
230 if ( op->flags & GNTMAP_host_map )
231 act->pin += (op->flags & GNTMAP_readonly) ?
232 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
234 spin_unlock(&rd->grant_table->lock);
236 frame = act->frame;
237 if ( unlikely(!mfn_valid(frame)) ||
238 unlikely(!((op->flags & GNTMAP_readonly) ?
239 get_page(mfn_to_page(frame), rd) :
240 get_page_and_type(mfn_to_page(frame), rd,
241 PGT_writable_page))) )
242 PIN_FAIL(undo_out, GNTST_general_error,
243 "Could not pin the granted frame (%lx)!\n", frame);
245 if ( op->flags & GNTMAP_host_map )
246 {
247 rc = create_grant_host_mapping(op->host_addr, frame, op->flags);
248 if ( rc != GNTST_okay )
249 {
250 if ( !(op->flags & GNTMAP_readonly) )
251 put_page_type(mfn_to_page(frame));
252 put_page(mfn_to_page(frame));
253 goto undo_out;
254 }
256 if ( op->flags & GNTMAP_device_map )
257 {
258 (void)get_page(mfn_to_page(frame), rd);
259 if ( !(op->flags & GNTMAP_readonly) )
260 get_page_type(mfn_to_page(frame), PGT_writable_page);
261 }
262 }
264 TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
266 ld->grant_table->maptrack[handle].domid = op->dom;
267 ld->grant_table->maptrack[handle].ref = op->ref;
268 ld->grant_table->maptrack[handle].flags = op->flags;
270 op->dev_bus_addr = (u64)frame << PAGE_SHIFT;
271 op->handle = handle;
272 op->status = GNTST_okay;
274 put_domain(rd);
275 return;
277 undo_out:
278 spin_lock(&rd->grant_table->lock);
280 if ( op->flags & GNTMAP_device_map )
281 act->pin -= (op->flags & GNTMAP_readonly) ?
282 GNTPIN_devr_inc : GNTPIN_devw_inc;
283 if ( op->flags & GNTMAP_host_map )
284 act->pin -= (op->flags & GNTMAP_readonly) ?
285 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
287 if ( !(op->flags & GNTMAP_readonly) &&
288 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
289 gnttab_clear_flag(_GTF_writing, &sha->flags);
291 if ( !act->pin )
292 gnttab_clear_flag(_GTF_reading, &sha->flags);
294 unlock_out:
295 spin_unlock(&rd->grant_table->lock);
296 op->status = rc;
297 put_maptrack_handle(ld->grant_table, handle);
298 put_domain(rd);
299 }
301 static long
302 gnttab_map_grant_ref(
303 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) uop, unsigned int count)
304 {
305 int i;
306 struct gnttab_map_grant_ref op;
308 for ( i = 0; i < count; i++ )
309 {
310 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
311 return -EFAULT;
312 __gnttab_map_grant_ref(&op);
313 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
314 return -EFAULT;
315 }
317 return 0;
318 }
320 static void
321 __gnttab_unmap_grant_ref(
322 struct gnttab_unmap_grant_ref *op)
323 {
324 domid_t dom;
325 grant_ref_t ref;
326 struct domain *ld, *rd;
327 struct active_grant_entry *act;
328 grant_entry_t *sha;
329 struct grant_mapping *map;
330 u16 flags;
331 s16 rc = 0;
332 unsigned long frame;
334 ld = current->domain;
336 frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT);
338 map = &ld->grant_table->maptrack[op->handle];
340 if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) ||
341 unlikely(!map->flags) )
342 {
343 DPRINTK("Bad handle (%d).\n", op->handle);
344 op->status = GNTST_bad_handle;
345 return;
346 }
348 dom = map->domid;
349 ref = map->ref;
350 flags = map->flags;
352 if ( unlikely((rd = find_domain_by_id(dom)) == NULL) )
353 {
354 if ( rd != NULL )
355 put_domain(rd);
356 DPRINTK("Could not find domain %d\n", dom);
357 op->status = GNTST_bad_domain;
358 return;
359 }
361 TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom);
363 act = &rd->grant_table->active[ref];
364 sha = &rd->grant_table->shared[ref];
366 spin_lock(&rd->grant_table->lock);
368 if ( frame == 0 )
369 {
370 frame = act->frame;
371 }
372 else
373 {
374 if ( unlikely(frame != act->frame) )
375 PIN_FAIL(unmap_out, GNTST_general_error,
376 "Bad frame number doesn't match gntref.\n");
377 if ( flags & GNTMAP_device_map )
378 {
379 ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
380 map->flags &= ~GNTMAP_device_map;
381 if ( flags & GNTMAP_readonly )
382 {
383 act->pin -= GNTPIN_devr_inc;
384 put_page(mfn_to_page(frame));
385 }
386 else
387 {
388 act->pin -= GNTPIN_devw_inc;
389 put_page_and_type(mfn_to_page(frame));
390 }
391 }
392 }
394 if ( (op->host_addr != 0) && (flags & GNTMAP_host_map) )
395 {
396 if ( (rc = destroy_grant_host_mapping(op->host_addr,
397 frame, flags)) < 0 )
398 goto unmap_out;
400 ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
401 map->flags &= ~GNTMAP_host_map;
402 if ( flags & GNTMAP_readonly )
403 {
404 act->pin -= GNTPIN_hstr_inc;
405 put_page(mfn_to_page(frame));
406 }
407 else
408 {
409 act->pin -= GNTPIN_hstw_inc;
410 put_page_and_type(mfn_to_page(frame));
411 }
412 }
414 if ( (map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
415 {
416 map->flags = 0;
417 put_maptrack_handle(ld->grant_table, op->handle);
418 }
420 /* If just unmapped a writable mapping, mark as dirtied */
421 if ( !(flags & GNTMAP_readonly) )
422 gnttab_log_dirty(rd, frame);
424 if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
425 !(flags & GNTMAP_readonly) )
426 gnttab_clear_flag(_GTF_writing, &sha->flags);
428 if ( act->pin == 0 )
429 gnttab_clear_flag(_GTF_reading, &sha->flags);
431 unmap_out:
432 op->status = rc;
433 spin_unlock(&rd->grant_table->lock);
434 put_domain(rd);
435 }
437 static long
438 gnttab_unmap_grant_ref(
439 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) uop, unsigned int count)
440 {
441 int i;
442 struct gnttab_unmap_grant_ref op;
444 for ( i = 0; i < count; i++ )
445 {
446 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
447 goto fault;
448 __gnttab_unmap_grant_ref(&op);
449 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
450 goto fault;
451 }
453 flush_tlb_mask(current->domain->domain_dirty_cpumask);
454 return 0;
456 fault:
457 flush_tlb_mask(current->domain->domain_dirty_cpumask);
458 return -EFAULT;
459 }
461 static long
462 gnttab_setup_table(
463 XEN_GUEST_HANDLE(gnttab_setup_table_t) uop, unsigned int count)
464 {
465 struct gnttab_setup_table op;
466 struct domain *d;
467 int i;
468 unsigned long gmfn;
469 domid_t dom;
471 if ( count != 1 )
472 return -EINVAL;
474 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
475 {
476 DPRINTK("Fault while reading gnttab_setup_table_t.\n");
477 return -EFAULT;
478 }
480 if ( unlikely(op.nr_frames > NR_GRANT_FRAMES) )
481 {
482 DPRINTK("Xen only supports up to %d grant-table frames per domain.\n",
483 NR_GRANT_FRAMES);
484 op.status = GNTST_general_error;
485 goto out;
486 }
488 dom = op.dom;
489 if ( dom == DOMID_SELF )
490 {
491 dom = current->domain->domain_id;
492 }
493 else if ( unlikely(!IS_PRIV(current->domain)) )
494 {
495 op.status = GNTST_permission_denied;
496 goto out;
497 }
499 if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
500 {
501 DPRINTK("Bad domid %d.\n", dom);
502 op.status = GNTST_bad_domain;
503 goto out;
504 }
506 ASSERT(d->grant_table != NULL);
507 op.status = GNTST_okay;
508 for ( i = 0; i < op.nr_frames; i++ )
509 {
510 gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
511 (void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
512 }
514 put_domain(d);
516 out:
517 if ( unlikely(copy_to_guest(uop, &op, 1)) )
518 return -EFAULT;
520 return 0;
521 }
523 /*
524 * Check that the given grant reference (rd,ref) allows 'ld' to transfer
525 * ownership of a page frame. If so, lock down the grant entry.
526 */
527 static int
528 gnttab_prepare_for_transfer(
529 struct domain *rd, struct domain *ld, grant_ref_t ref)
530 {
531 struct grant_table *rgt;
532 struct grant_entry *sha;
533 domid_t sdom;
534 u16 sflags;
535 u32 scombo, prev_scombo;
536 int retries = 0;
538 if ( unlikely((rgt = rd->grant_table) == NULL) ||
539 unlikely(ref >= NR_GRANT_ENTRIES) )
540 {
541 DPRINTK("Dom %d has no g.t., or ref is bad (%d).\n",
542 rd->domain_id, ref);
543 return 0;
544 }
546 spin_lock(&rgt->lock);
548 sha = &rgt->shared[ref];
550 sflags = sha->flags;
551 sdom = sha->domid;
553 for ( ; ; )
554 {
555 if ( unlikely(sflags != GTF_accept_transfer) ||
556 unlikely(sdom != ld->domain_id) )
557 {
558 DPRINTK("Bad flags (%x) or dom (%d). (NB. expected dom %d)\n",
559 sflags, sdom, ld->domain_id);
560 goto fail;
561 }
563 /* Merge two 16-bit values into a 32-bit combined update. */
564 /* NB. Endianness! */
565 scombo = ((u32)sdom << 16) | (u32)sflags;
567 prev_scombo = cmpxchg((u32 *)&sha->flags, scombo,
568 scombo | GTF_transfer_committed);
570 /* Did the combined update work (did we see what we expected?). */
571 if ( likely(prev_scombo == scombo) )
572 break;
574 if ( retries++ == 4 )
575 {
576 DPRINTK("Shared grant entry is unstable.\n");
577 goto fail;
578 }
580 /* Didn't see what we expected. Split out the seen flags & dom. */
581 /* NB. Endianness! */
582 sflags = (u16)prev_scombo;
583 sdom = (u16)(prev_scombo >> 16);
584 }
586 spin_unlock(&rgt->lock);
587 return 1;
589 fail:
590 spin_unlock(&rgt->lock);
591 return 0;
592 }
594 static long
595 gnttab_transfer(
596 XEN_GUEST_HANDLE(gnttab_transfer_t) uop, unsigned int count)
597 {
598 struct domain *d = current->domain;
599 struct domain *e;
600 struct page_info *page;
601 int i;
602 grant_entry_t *sha;
603 struct gnttab_transfer gop;
604 unsigned long mfn;
606 for ( i = 0; i < count; i++ )
607 {
608 /* Read from caller address space. */
609 if ( unlikely(__copy_from_guest_offset(&gop, uop, i, 1)) )
610 {
611 DPRINTK("gnttab_transfer: error reading req %d/%d\n", i, count);
612 return -EFAULT;
613 }
615 mfn = gmfn_to_mfn(d, gop.mfn);
617 /* Check the passed page frame for basic validity. */
618 if ( unlikely(!mfn_valid(mfn)) )
619 {
620 DPRINTK("gnttab_transfer: out-of-range %lx\n",
621 (unsigned long)gop.mfn);
622 gop.status = GNTST_bad_page;
623 goto copyback;
624 }
626 page = mfn_to_page(mfn);
627 if ( unlikely(IS_XEN_HEAP_FRAME(page)) )
628 {
629 DPRINTK("gnttab_transfer: xen frame %lx\n",
630 (unsigned long)gop.mfn);
631 gop.status = GNTST_bad_page;
632 goto copyback;
633 }
635 if ( steal_page(d, page, 0) < 0 )
636 {
637 gop.status = GNTST_bad_page;
638 goto copyback;
639 }
641 /* Find the target domain. */
642 if ( unlikely((e = find_domain_by_id(gop.domid)) == NULL) )
643 {
644 DPRINTK("gnttab_transfer: can't find domain %d\n", gop.domid);
645 page->count_info &= ~(PGC_count_mask|PGC_allocated);
646 free_domheap_page(page);
647 gop.status = GNTST_bad_domain;
648 goto copyback;
649 }
651 spin_lock(&e->page_alloc_lock);
653 /*
654 * Check that 'e' will accept the page and has reservation
655 * headroom. Also, a domain mustn't have PGC_allocated
656 * pages when it is dying.
657 */
658 if ( unlikely(test_bit(_DOMF_dying, &e->domain_flags)) ||
659 unlikely(e->tot_pages >= e->max_pages) ||
660 unlikely(!gnttab_prepare_for_transfer(e, d, gop.ref)) )
661 {
662 if ( !test_bit(_DOMF_dying, &e->domain_flags) )
663 DPRINTK("gnttab_transfer: Transferee has no reservation "
664 "headroom (%d,%d) or provided a bad grant ref (%08x) "
665 "or is dying (%lx)\n",
666 e->tot_pages, e->max_pages, gop.ref, e->domain_flags);
667 spin_unlock(&e->page_alloc_lock);
668 put_domain(e);
669 page->count_info &= ~(PGC_count_mask|PGC_allocated);
670 free_domheap_page(page);
671 gop.status = GNTST_general_error;
672 goto copyback;
673 }
675 /* Okay, add the page to 'e'. */
676 if ( unlikely(e->tot_pages++ == 0) )
677 get_knownalive_domain(e);
678 list_add_tail(&page->list, &e->page_list);
679 page_set_owner(page, e);
681 spin_unlock(&e->page_alloc_lock);
683 TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
685 /* Tell the guest about its new page frame. */
686 sha = &e->grant_table->shared[gop.ref];
687 guest_physmap_add_page(e, sha->frame, mfn);
688 sha->frame = mfn;
689 wmb();
690 sha->flags |= GTF_transfer_completed;
692 put_domain(e);
694 gop.status = GNTST_okay;
696 copyback:
697 if ( unlikely(__copy_to_guest_offset(uop, i, &gop, 1)) )
698 {
699 DPRINTK("gnttab_transfer: error writing resp %d/%d\n", i, count);
700 return -EFAULT;
701 }
702 }
704 return 0;
705 }
707 long
708 do_grant_table_op(
709 unsigned int cmd, XEN_GUEST_HANDLE(void) uop, unsigned int count)
710 {
711 long rc;
712 struct domain *d = current->domain;
714 if ( count > 512 )
715 return -EINVAL;
717 LOCK_BIGLOCK(d);
719 sync_pagetable_state(d);
721 rc = -EFAULT;
722 switch ( cmd )
723 {
724 case GNTTABOP_map_grant_ref:
725 {
726 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) map =
727 guest_handle_cast(uop, gnttab_map_grant_ref_t);
728 if ( unlikely(!guest_handle_okay(map, count)) )
729 goto out;
730 rc = gnttab_map_grant_ref(map, count);
731 break;
732 }
733 case GNTTABOP_unmap_grant_ref:
734 {
735 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) unmap =
736 guest_handle_cast(uop, gnttab_unmap_grant_ref_t);
737 if ( unlikely(!guest_handle_okay(unmap, count)) )
738 goto out;
739 rc = gnttab_unmap_grant_ref(unmap, count);
740 break;
741 }
742 case GNTTABOP_setup_table:
743 {
744 rc = gnttab_setup_table(
745 guest_handle_cast(uop, gnttab_setup_table_t), count);
746 break;
747 }
748 case GNTTABOP_transfer:
749 {
750 XEN_GUEST_HANDLE(gnttab_transfer_t) transfer =
751 guest_handle_cast(uop, gnttab_transfer_t);
752 if ( unlikely(!guest_handle_okay(transfer, count)) )
753 goto out;
754 rc = gnttab_transfer(transfer, count);
755 break;
756 }
757 default:
758 rc = -ENOSYS;
759 break;
760 }
762 out:
763 UNLOCK_BIGLOCK(d);
765 return rc;
766 }
768 int
769 grant_table_create(
770 struct domain *d)
771 {
772 struct grant_table *t;
773 int i;
775 BUG_ON(MAPTRACK_MAX_ENTRIES < NR_GRANT_ENTRIES);
776 if ( (t = xmalloc(struct grant_table)) == NULL )
777 goto no_mem;
779 /* Simple stuff. */
780 memset(t, 0, sizeof(*t));
781 spin_lock_init(&t->lock);
783 /* Active grant table. */
784 t->active = xmalloc_array(struct active_grant_entry, NR_GRANT_ENTRIES);
785 if ( t->active == NULL )
786 goto no_mem;
787 memset(t->active, 0, sizeof(struct active_grant_entry) * NR_GRANT_ENTRIES);
789 /* Tracking of mapped foreign frames table */
790 if ( (t->maptrack = alloc_xenheap_page()) == NULL )
791 goto no_mem;
792 t->maptrack_order = 0;
793 t->maptrack_limit = PAGE_SIZE / sizeof(struct grant_mapping);
794 memset(t->maptrack, 0, PAGE_SIZE);
795 for ( i = 0; i < t->maptrack_limit; i++ )
796 t->maptrack[i].ref = i+1;
798 /* Shared grant table. */
799 t->shared = alloc_xenheap_pages(ORDER_GRANT_FRAMES);
800 if ( t->shared == NULL )
801 goto no_mem;
802 memset(t->shared, 0, NR_GRANT_FRAMES * PAGE_SIZE);
804 for ( i = 0; i < NR_GRANT_FRAMES; i++ )
805 gnttab_create_shared_page(d, t, i);
807 /* Okay, install the structure. */
808 wmb(); /* avoid races with lock-free access to d->grant_table */
809 d->grant_table = t;
810 return 0;
812 no_mem:
813 if ( t != NULL )
814 {
815 xfree(t->active);
816 free_xenheap_page(t->maptrack);
817 xfree(t);
818 }
819 return -ENOMEM;
820 }
822 void
823 gnttab_release_mappings(
824 struct domain *d)
825 {
826 struct grant_table *gt = d->grant_table;
827 struct grant_mapping *map;
828 grant_ref_t ref;
829 grant_handle_t handle;
830 struct domain *rd;
831 struct active_grant_entry *act;
832 struct grant_entry *sha;
834 BUG_ON(!test_bit(_DOMF_dying, &d->domain_flags));
836 for ( handle = 0; handle < gt->maptrack_limit; handle++ )
837 {
838 map = &gt->maptrack[handle];
839 if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
840 continue;
842 ref = map->ref;
844 DPRINTK("Grant release (%hu) ref:(%hu) flags:(%x) dom:(%hu)\n",
845 handle, ref, map->flags, map->domid);
847 rd = find_domain_by_id(map->domid);
848 BUG_ON(rd == NULL);
850 spin_lock(&rd->grant_table->lock);
852 act = &rd->grant_table->active[ref];
853 sha = &rd->grant_table->shared[ref];
855 if ( map->flags & GNTMAP_readonly )
856 {
857 if ( map->flags & GNTMAP_device_map )
858 {
859 BUG_ON(!(act->pin & GNTPIN_devr_mask));
860 act->pin -= GNTPIN_devr_inc;
861 put_page(mfn_to_page(act->frame));
862 }
864 if ( map->flags & GNTMAP_host_map )
865 {
866 BUG_ON(!(act->pin & GNTPIN_hstr_mask));
867 act->pin -= GNTPIN_hstr_inc;
868 /* Done implicitly when page tables are destroyed. */
869 /* put_page(mfn_to_page(act->frame)); */
870 }
871 }
872 else
873 {
874 if ( map->flags & GNTMAP_device_map )
875 {
876 BUG_ON(!(act->pin & GNTPIN_devw_mask));
877 act->pin -= GNTPIN_devw_inc;
878 put_page_and_type(mfn_to_page(act->frame));
879 }
881 if ( map->flags & GNTMAP_host_map )
882 {
883 BUG_ON(!(act->pin & GNTPIN_hstw_mask));
884 act->pin -= GNTPIN_hstw_inc;
885 /* Done implicitly when page tables are destroyed. */
886 /* put_page_and_type(mfn_to_page(act->frame)); */
887 }
889 if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
890 gnttab_clear_flag(_GTF_writing, &sha->flags);
891 }
893 if ( act->pin == 0 )
894 gnttab_clear_flag(_GTF_reading, &sha->flags);
896 spin_unlock(&rd->grant_table->lock);
898 put_domain(rd);
900 map->flags = 0;
901 }
902 }
905 void
906 grant_table_destroy(
907 struct domain *d)
908 {
909 struct grant_table *t = d->grant_table;
911 if ( t == NULL )
912 return;
914 free_xenheap_pages(t->shared, ORDER_GRANT_FRAMES);
915 free_xenheap_page(t->maptrack);
916 xfree(t->active);
917 xfree(t);
919 d->grant_table = NULL;
920 }
922 /*
923 * Local variables:
924 * mode: C
925 * c-set-style: "BSD"
926 * c-basic-offset: 4
927 * tab-width: 4
928 * indent-tabs-mode: nil
929 * End:
930 */