ia64/xen-unstable

view xen/common/grant_table.c @ 10614:86cae321e707

Use explicit accessors to handle unusually-sized atomic operations in grant table code.
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Jun 30 10:10:39 2006 +0100 (2006-06-30)
parents ee3d10828937
children 2937703f0ed0
line source
1 /******************************************************************************
2 * common/grant_table.c
3 *
4 * Mechanism for granting foreign access to page frames, and receiving
5 * page-ownership transfers.
6 *
7 * Copyright (c) 2005 Christopher Clark
8 * Copyright (c) 2004 K A Fraser
9 * Copyright (c) 2005 Andrew Warfield
10 * Modifications by Geoffrey Lefebvre are (c) Intel Research Cambridge
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
27 #include <xen/lib.h>
28 #include <xen/sched.h>
29 #include <xen/shadow.h>
30 #include <xen/mm.h>
31 #include <xen/trace.h>
32 #include <xen/guest_access.h>
33 #include <acm/acm_hooks.h>
35 #define PIN_FAIL(_lbl, _rc, _f, _a...) \
36 do { \
37 DPRINTK( _f, ## _a ); \
38 rc = (_rc); \
39 goto _lbl; \
40 } while ( 0 )
42 static inline int
43 get_maptrack_handle(
44 struct grant_table *t)
45 {
46 unsigned int h;
47 if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) )
48 return -1;
49 t->maptrack_head = t->maptrack[h].ref;
50 t->map_count++;
51 return h;
52 }
54 static inline void
55 put_maptrack_handle(
56 struct grant_table *t, int handle)
57 {
58 t->maptrack[handle].ref = t->maptrack_head;
59 t->maptrack_head = handle;
60 t->map_count--;
61 }
63 /*
64 * Returns 0 if TLB flush / invalidate required by caller.
65 * va will indicate the address to be invalidated.
66 *
67 * addr is _either_ a host virtual address, or the address of the pte to
68 * update, as indicated by the GNTMAP_contains_pte flag.
69 */
70 static void
71 __gnttab_map_grant_ref(
72 struct gnttab_map_grant_ref *op)
73 {
74 struct domain *ld, *rd;
75 struct vcpu *led;
76 int handle;
77 unsigned long frame = 0;
78 int rc = GNTST_okay;
79 struct active_grant_entry *act;
81 /* Entry details from @rd's shared grant table. */
82 grant_entry_t *sha;
83 domid_t sdom;
84 u16 sflags;
86 /*
87 * We bound the number of times we retry CMPXCHG on memory locations that
88 * we share with a guest OS. The reason is that the guest can modify that
89 * location at a higher rate than we can read-modify-CMPXCHG, so the guest
90 * could cause us to livelock. There are a few cases where it is valid for
91 * the guest to race our updates (e.g., to change the GTF_readonly flag),
92 * so we allow a few retries before failing.
93 */
94 int retries = 0;
96 led = current;
97 ld = led->domain;
99 if ( unlikely(op->ref >= NR_GRANT_ENTRIES) ||
100 unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) )
101 {
102 DPRINTK("Bad ref (%d) or flags (%x).\n", op->ref, op->flags);
103 op->status = GNTST_bad_gntref;
104 return;
105 }
107 if ( acm_pre_grant_map_ref(op->dom) )
108 {
109 op->status = GNTST_permission_denied;
110 return;
111 }
113 if ( unlikely((rd = find_domain_by_id(op->dom)) == NULL) ||
114 unlikely(ld == rd) )
115 {
116 if ( rd != NULL )
117 put_domain(rd);
118 DPRINTK("Could not find domain %d\n", op->dom);
119 op->status = GNTST_bad_domain;
120 return;
121 }
123 /* Get a maptrack handle. */
124 if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) )
125 {
126 int i;
127 struct grant_mapping *new_mt;
128 struct grant_table *lgt = ld->grant_table;
130 if ( (lgt->maptrack_limit << 1) > MAPTRACK_MAX_ENTRIES )
131 {
132 put_domain(rd);
133 DPRINTK("Maptrack table is at maximum size.\n");
134 op->status = GNTST_no_device_space;
135 return;
136 }
138 /* Grow the maptrack table. */
139 new_mt = alloc_xenheap_pages(lgt->maptrack_order + 1);
140 if ( new_mt == NULL )
141 {
142 put_domain(rd);
143 DPRINTK("No more map handles available.\n");
144 op->status = GNTST_no_device_space;
145 return;
146 }
148 memcpy(new_mt, lgt->maptrack, PAGE_SIZE << lgt->maptrack_order);
149 for ( i = lgt->maptrack_limit; i < (lgt->maptrack_limit << 1); i++ )
150 new_mt[i].ref = i+1;
152 free_xenheap_pages(lgt->maptrack, lgt->maptrack_order);
153 lgt->maptrack = new_mt;
154 lgt->maptrack_order += 1;
155 lgt->maptrack_limit <<= 1;
157 DPRINTK("Doubled maptrack size\n");
158 handle = get_maptrack_handle(ld->grant_table);
159 }
161 act = &rd->grant_table->active[op->ref];
162 sha = &rd->grant_table->shared[op->ref];
164 spin_lock(&rd->grant_table->lock);
166 if ( !act->pin ||
167 (!(op->flags & GNTMAP_readonly) &&
168 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask))) )
169 {
170 sflags = sha->flags;
171 sdom = sha->domid;
173 /*
174 * This loop attempts to set the access (reading/writing) flags
175 * in the grant table entry. It tries a cmpxchg on the field
176 * up to five times, and then fails under the assumption that
177 * the guest is misbehaving.
178 */
179 for ( ; ; )
180 {
181 u32 scombo, prev_scombo, new_scombo;
183 if ( unlikely((sflags & GTF_type_mask) != GTF_permit_access) ||
184 unlikely(sdom != led->domain->domain_id) )
185 PIN_FAIL(unlock_out, GNTST_general_error,
186 "Bad flags (%x) or dom (%d). (NB. expected dom %d)\n",
187 sflags, sdom, led->domain->domain_id);
189 /* Merge two 16-bit values into a 32-bit combined update. */
190 /* NB. Endianness! */
191 scombo = ((u32)sdom << 16) | (u32)sflags;
193 new_scombo = scombo | GTF_reading;
194 if ( !(op->flags & GNTMAP_readonly) )
195 {
196 new_scombo |= GTF_writing;
197 if ( unlikely(sflags & GTF_readonly) )
198 PIN_FAIL(unlock_out, GNTST_general_error,
199 "Attempt to write-pin a r/o grant entry.\n");
200 }
202 prev_scombo = cmpxchg((u32 *)&sha->flags, scombo, new_scombo);
204 /* Did the combined update work (did we see what we expected?). */
205 if ( likely(prev_scombo == scombo) )
206 break;
208 if ( retries++ == 4 )
209 PIN_FAIL(unlock_out, GNTST_general_error,
210 "Shared grant entry is unstable.\n");
212 /* Didn't see what we expected. Split out the seen flags & dom. */
213 /* NB. Endianness! */
214 sflags = (u16)prev_scombo;
215 sdom = (u16)(prev_scombo >> 16);
216 }
218 if ( !act->pin )
219 {
220 act->domid = sdom;
221 act->frame = gmfn_to_mfn(rd, sha->frame);
222 }
223 }
224 else if ( (act->pin & 0x80808080U) != 0 )
225 PIN_FAIL(unlock_out, ENOSPC,
226 "Risk of counter overflow %08x\n", act->pin);
228 if ( op->flags & GNTMAP_device_map )
229 act->pin += (op->flags & GNTMAP_readonly) ?
230 GNTPIN_devr_inc : GNTPIN_devw_inc;
231 if ( op->flags & GNTMAP_host_map )
232 act->pin += (op->flags & GNTMAP_readonly) ?
233 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
235 spin_unlock(&rd->grant_table->lock);
237 frame = act->frame;
238 if ( unlikely(!mfn_valid(frame)) ||
239 unlikely(!((op->flags & GNTMAP_readonly) ?
240 get_page(mfn_to_page(frame), rd) :
241 get_page_and_type(mfn_to_page(frame), rd,
242 PGT_writable_page))) )
243 PIN_FAIL(undo_out, GNTST_general_error,
244 "Could not pin the granted frame (%lx)!\n", frame);
246 if ( op->flags & GNTMAP_host_map )
247 {
248 rc = create_grant_host_mapping(op->host_addr, frame, op->flags);
249 if ( rc != GNTST_okay )
250 {
251 if ( !(op->flags & GNTMAP_readonly) )
252 put_page_type(mfn_to_page(frame));
253 put_page(mfn_to_page(frame));
254 goto undo_out;
255 }
257 if ( op->flags & GNTMAP_device_map )
258 {
259 (void)get_page(mfn_to_page(frame), rd);
260 if ( !(op->flags & GNTMAP_readonly) )
261 get_page_type(mfn_to_page(frame), PGT_writable_page);
262 }
263 }
265 TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
267 ld->grant_table->maptrack[handle].domid = op->dom;
268 ld->grant_table->maptrack[handle].ref = op->ref;
269 ld->grant_table->maptrack[handle].flags = op->flags;
271 op->dev_bus_addr = (u64)frame << PAGE_SHIFT;
272 op->handle = handle;
273 op->status = GNTST_okay;
275 put_domain(rd);
276 return;
278 undo_out:
279 spin_lock(&rd->grant_table->lock);
281 if ( op->flags & GNTMAP_device_map )
282 act->pin -= (op->flags & GNTMAP_readonly) ?
283 GNTPIN_devr_inc : GNTPIN_devw_inc;
284 if ( op->flags & GNTMAP_host_map )
285 act->pin -= (op->flags & GNTMAP_readonly) ?
286 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
288 if ( !(op->flags & GNTMAP_readonly) &&
289 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
290 gnttab_clear_flag(_GTF_writing, &sha->flags);
292 if ( !act->pin )
293 gnttab_clear_flag(_GTF_reading, &sha->flags);
295 unlock_out:
296 spin_unlock(&rd->grant_table->lock);
297 op->status = rc;
298 put_maptrack_handle(ld->grant_table, handle);
299 put_domain(rd);
300 }
302 static long
303 gnttab_map_grant_ref(
304 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) uop, unsigned int count)
305 {
306 int i;
307 struct gnttab_map_grant_ref op;
309 for ( i = 0; i < count; i++ )
310 {
311 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
312 return -EFAULT;
313 __gnttab_map_grant_ref(&op);
314 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
315 return -EFAULT;
316 }
318 return 0;
319 }
321 static void
322 __gnttab_unmap_grant_ref(
323 struct gnttab_unmap_grant_ref *op)
324 {
325 domid_t dom;
326 grant_ref_t ref;
327 struct domain *ld, *rd;
328 struct active_grant_entry *act;
329 grant_entry_t *sha;
330 struct grant_mapping *map;
331 u16 flags;
332 s16 rc = 0;
333 unsigned long frame;
335 ld = current->domain;
337 frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT);
339 map = &ld->grant_table->maptrack[op->handle];
341 if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) ||
342 unlikely(!map->flags) )
343 {
344 DPRINTK("Bad handle (%d).\n", op->handle);
345 op->status = GNTST_bad_handle;
346 return;
347 }
349 dom = map->domid;
350 ref = map->ref;
351 flags = map->flags;
353 if ( unlikely((rd = find_domain_by_id(dom)) == NULL) ||
354 unlikely(ld == rd) )
355 {
356 if ( rd != NULL )
357 put_domain(rd);
358 DPRINTK("Could not find domain %d\n", dom);
359 op->status = GNTST_bad_domain;
360 return;
361 }
363 TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom);
365 act = &rd->grant_table->active[ref];
366 sha = &rd->grant_table->shared[ref];
368 spin_lock(&rd->grant_table->lock);
370 if ( frame == 0 )
371 {
372 frame = act->frame;
373 }
374 else
375 {
376 if ( unlikely(frame != act->frame) )
377 PIN_FAIL(unmap_out, GNTST_general_error,
378 "Bad frame number doesn't match gntref.\n");
379 if ( flags & GNTMAP_device_map )
380 {
381 ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
382 map->flags &= ~GNTMAP_device_map;
383 if ( flags & GNTMAP_readonly )
384 {
385 act->pin -= GNTPIN_devr_inc;
386 put_page(mfn_to_page(frame));
387 }
388 else
389 {
390 act->pin -= GNTPIN_devw_inc;
391 put_page_and_type(mfn_to_page(frame));
392 }
393 }
394 }
396 if ( (op->host_addr != 0) && (flags & GNTMAP_host_map) )
397 {
398 if ( (rc = destroy_grant_host_mapping(op->host_addr,
399 frame, flags)) < 0 )
400 goto unmap_out;
402 ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
403 map->flags &= ~GNTMAP_host_map;
404 if ( flags & GNTMAP_readonly )
405 {
406 act->pin -= GNTPIN_hstr_inc;
407 put_page(mfn_to_page(frame));
408 }
409 else
410 {
411 act->pin -= GNTPIN_hstw_inc;
412 put_page_and_type(mfn_to_page(frame));
413 }
414 }
416 if ( (map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
417 {
418 map->flags = 0;
419 put_maptrack_handle(ld->grant_table, op->handle);
420 }
422 /* If just unmapped a writable mapping, mark as dirtied */
423 if ( !(flags & GNTMAP_readonly) )
424 gnttab_log_dirty(rd, frame);
426 if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
427 !(flags & GNTMAP_readonly) )
428 gnttab_clear_flag(_GTF_writing, &sha->flags);
430 if ( act->pin == 0 )
431 gnttab_clear_flag(_GTF_reading, &sha->flags);
433 unmap_out:
434 op->status = rc;
435 spin_unlock(&rd->grant_table->lock);
436 put_domain(rd);
437 }
439 static long
440 gnttab_unmap_grant_ref(
441 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) uop, unsigned int count)
442 {
443 int i;
444 struct gnttab_unmap_grant_ref op;
446 for ( i = 0; i < count; i++ )
447 {
448 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
449 goto fault;
450 __gnttab_unmap_grant_ref(&op);
451 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
452 goto fault;
453 }
455 flush_tlb_mask(current->domain->domain_dirty_cpumask);
456 return 0;
458 fault:
459 flush_tlb_mask(current->domain->domain_dirty_cpumask);
460 return -EFAULT;
461 }
463 static long
464 gnttab_setup_table(
465 XEN_GUEST_HANDLE(gnttab_setup_table_t) uop, unsigned int count)
466 {
467 struct gnttab_setup_table op;
468 struct domain *d;
469 int i;
470 unsigned long gmfn;
471 domid_t dom;
473 if ( count != 1 )
474 return -EINVAL;
476 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
477 {
478 DPRINTK("Fault while reading gnttab_setup_table_t.\n");
479 return -EFAULT;
480 }
482 if ( unlikely(op.nr_frames > NR_GRANT_FRAMES) )
483 {
484 DPRINTK("Xen only supports up to %d grant-table frames per domain.\n",
485 NR_GRANT_FRAMES);
486 op.status = GNTST_general_error;
487 goto out;
488 }
490 dom = op.dom;
491 if ( dom == DOMID_SELF )
492 {
493 dom = current->domain->domain_id;
494 }
495 else if ( unlikely(!IS_PRIV(current->domain)) )
496 {
497 op.status = GNTST_permission_denied;
498 goto out;
499 }
501 if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
502 {
503 DPRINTK("Bad domid %d.\n", dom);
504 op.status = GNTST_bad_domain;
505 goto out;
506 }
508 ASSERT(d->grant_table != NULL);
509 op.status = GNTST_okay;
510 for ( i = 0; i < op.nr_frames; i++ )
511 {
512 gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
513 (void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
514 }
516 put_domain(d);
518 out:
519 if ( unlikely(copy_to_guest(uop, &op, 1)) )
520 return -EFAULT;
522 return 0;
523 }
525 /*
526 * Check that the given grant reference (rd,ref) allows 'ld' to transfer
527 * ownership of a page frame. If so, lock down the grant entry.
528 */
529 static int
530 gnttab_prepare_for_transfer(
531 struct domain *rd, struct domain *ld, grant_ref_t ref)
532 {
533 struct grant_table *rgt;
534 struct grant_entry *sha;
535 domid_t sdom;
536 u16 sflags;
537 u32 scombo, prev_scombo;
538 int retries = 0;
540 if ( unlikely((rgt = rd->grant_table) == NULL) ||
541 unlikely(ref >= NR_GRANT_ENTRIES) )
542 {
543 DPRINTK("Dom %d has no g.t., or ref is bad (%d).\n",
544 rd->domain_id, ref);
545 return 0;
546 }
548 spin_lock(&rgt->lock);
550 sha = &rgt->shared[ref];
552 sflags = sha->flags;
553 sdom = sha->domid;
555 for ( ; ; )
556 {
557 if ( unlikely(sflags != GTF_accept_transfer) ||
558 unlikely(sdom != ld->domain_id) )
559 {
560 DPRINTK("Bad flags (%x) or dom (%d). (NB. expected dom %d)\n",
561 sflags, sdom, ld->domain_id);
562 goto fail;
563 }
565 /* Merge two 16-bit values into a 32-bit combined update. */
566 /* NB. Endianness! */
567 scombo = ((u32)sdom << 16) | (u32)sflags;
569 prev_scombo = cmpxchg((u32 *)&sha->flags, scombo,
570 scombo | GTF_transfer_committed);
572 /* Did the combined update work (did we see what we expected?). */
573 if ( likely(prev_scombo == scombo) )
574 break;
576 if ( retries++ == 4 )
577 {
578 DPRINTK("Shared grant entry is unstable.\n");
579 goto fail;
580 }
582 /* Didn't see what we expected. Split out the seen flags & dom. */
583 /* NB. Endianness! */
584 sflags = (u16)prev_scombo;
585 sdom = (u16)(prev_scombo >> 16);
586 }
588 spin_unlock(&rgt->lock);
589 return 1;
591 fail:
592 spin_unlock(&rgt->lock);
593 return 0;
594 }
596 static long
597 gnttab_transfer(
598 XEN_GUEST_HANDLE(gnttab_transfer_t) uop, unsigned int count)
599 {
600 struct domain *d = current->domain;
601 struct domain *e;
602 struct page_info *page;
603 int i;
604 grant_entry_t *sha;
605 struct gnttab_transfer gop;
606 unsigned long mfn;
608 for ( i = 0; i < count; i++ )
609 {
610 /* Read from caller address space. */
611 if ( unlikely(__copy_from_guest_offset(&gop, uop, i, 1)) )
612 {
613 DPRINTK("gnttab_transfer: error reading req %d/%d\n", i, count);
614 return -EFAULT;
615 }
617 mfn = gmfn_to_mfn(d, gop.mfn);
619 /* Check the passed page frame for basic validity. */
620 if ( unlikely(!mfn_valid(mfn)) )
621 {
622 DPRINTK("gnttab_transfer: out-of-range %lx\n",
623 (unsigned long)gop.mfn);
624 gop.status = GNTST_bad_page;
625 goto copyback;
626 }
628 page = mfn_to_page(mfn);
629 if ( unlikely(IS_XEN_HEAP_FRAME(page)) )
630 {
631 DPRINTK("gnttab_transfer: xen frame %lx\n",
632 (unsigned long)gop.mfn);
633 gop.status = GNTST_bad_page;
634 goto copyback;
635 }
637 if ( steal_page(d, page, 0) < 0 )
638 {
639 gop.status = GNTST_bad_page;
640 goto copyback;
641 }
643 /* Find the target domain. */
644 if ( unlikely((e = find_domain_by_id(gop.domid)) == NULL) )
645 {
646 DPRINTK("gnttab_transfer: can't find domain %d\n", gop.domid);
647 page->count_info &= ~(PGC_count_mask|PGC_allocated);
648 free_domheap_page(page);
649 gop.status = GNTST_bad_domain;
650 goto copyback;
651 }
653 spin_lock(&e->page_alloc_lock);
655 /*
656 * Check that 'e' will accept the page and has reservation
657 * headroom. Also, a domain mustn't have PGC_allocated
658 * pages when it is dying.
659 */
660 if ( unlikely(test_bit(_DOMF_dying, &e->domain_flags)) ||
661 unlikely(e->tot_pages >= e->max_pages) ||
662 unlikely(!gnttab_prepare_for_transfer(e, d, gop.ref)) )
663 {
664 if ( !test_bit(_DOMF_dying, &e->domain_flags) )
665 DPRINTK("gnttab_transfer: Transferee has no reservation "
666 "headroom (%d,%d) or provided a bad grant ref (%08x) "
667 "or is dying (%lx)\n",
668 e->tot_pages, e->max_pages, gop.ref, e->domain_flags);
669 spin_unlock(&e->page_alloc_lock);
670 put_domain(e);
671 page->count_info &= ~(PGC_count_mask|PGC_allocated);
672 free_domheap_page(page);
673 gop.status = GNTST_general_error;
674 goto copyback;
675 }
677 /* Okay, add the page to 'e'. */
678 if ( unlikely(e->tot_pages++ == 0) )
679 get_knownalive_domain(e);
680 list_add_tail(&page->list, &e->page_list);
681 page_set_owner(page, e);
683 spin_unlock(&e->page_alloc_lock);
685 TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
687 /* Tell the guest about its new page frame. */
688 sha = &e->grant_table->shared[gop.ref];
689 guest_physmap_add_page(e, sha->frame, mfn);
690 sha->frame = mfn;
691 wmb();
692 sha->flags |= GTF_transfer_completed;
694 put_domain(e);
696 gop.status = GNTST_okay;
698 copyback:
699 if ( unlikely(__copy_to_guest_offset(uop, i, &gop, 1)) )
700 {
701 DPRINTK("gnttab_transfer: error writing resp %d/%d\n", i, count);
702 return -EFAULT;
703 }
704 }
706 return 0;
707 }
709 long
710 do_grant_table_op(
711 unsigned int cmd, XEN_GUEST_HANDLE(void) uop, unsigned int count)
712 {
713 long rc;
714 struct domain *d = current->domain;
716 if ( count > 512 )
717 return -EINVAL;
719 LOCK_BIGLOCK(d);
721 sync_pagetable_state(d);
723 rc = -EFAULT;
724 switch ( cmd )
725 {
726 case GNTTABOP_map_grant_ref:
727 {
728 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) map =
729 guest_handle_cast(uop, gnttab_map_grant_ref_t);
730 if ( unlikely(!guest_handle_okay(map, count)) )
731 goto out;
732 rc = gnttab_map_grant_ref(map, count);
733 break;
734 }
735 case GNTTABOP_unmap_grant_ref:
736 {
737 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) unmap =
738 guest_handle_cast(uop, gnttab_unmap_grant_ref_t);
739 if ( unlikely(!guest_handle_okay(unmap, count)) )
740 goto out;
741 rc = gnttab_unmap_grant_ref(unmap, count);
742 break;
743 }
744 case GNTTABOP_setup_table:
745 {
746 rc = gnttab_setup_table(
747 guest_handle_cast(uop, gnttab_setup_table_t), count);
748 break;
749 }
750 case GNTTABOP_transfer:
751 {
752 XEN_GUEST_HANDLE(gnttab_transfer_t) transfer =
753 guest_handle_cast(uop, gnttab_transfer_t);
754 if ( unlikely(!guest_handle_okay(transfer, count)) )
755 goto out;
756 rc = gnttab_transfer(transfer, count);
757 break;
758 }
759 default:
760 rc = -ENOSYS;
761 break;
762 }
764 out:
765 UNLOCK_BIGLOCK(d);
767 return rc;
768 }
770 int
771 grant_table_create(
772 struct domain *d)
773 {
774 struct grant_table *t;
775 int i;
777 BUG_ON(MAPTRACK_MAX_ENTRIES < NR_GRANT_ENTRIES);
778 if ( (t = xmalloc(struct grant_table)) == NULL )
779 goto no_mem;
781 /* Simple stuff. */
782 memset(t, 0, sizeof(*t));
783 spin_lock_init(&t->lock);
785 /* Active grant table. */
786 t->active = xmalloc_array(struct active_grant_entry, NR_GRANT_ENTRIES);
787 if ( t->active == NULL )
788 goto no_mem;
789 memset(t->active, 0, sizeof(struct active_grant_entry) * NR_GRANT_ENTRIES);
791 /* Tracking of mapped foreign frames table */
792 if ( (t->maptrack = alloc_xenheap_page()) == NULL )
793 goto no_mem;
794 t->maptrack_order = 0;
795 t->maptrack_limit = PAGE_SIZE / sizeof(struct grant_mapping);
796 memset(t->maptrack, 0, PAGE_SIZE);
797 for ( i = 0; i < t->maptrack_limit; i++ )
798 t->maptrack[i].ref = i+1;
800 /* Shared grant table. */
801 t->shared = alloc_xenheap_pages(ORDER_GRANT_FRAMES);
802 if ( t->shared == NULL )
803 goto no_mem;
804 memset(t->shared, 0, NR_GRANT_FRAMES * PAGE_SIZE);
806 for ( i = 0; i < NR_GRANT_FRAMES; i++ )
807 gnttab_create_shared_page(d, t, i);
809 /* Okay, install the structure. */
810 wmb(); /* avoid races with lock-free access to d->grant_table */
811 d->grant_table = t;
812 return 0;
814 no_mem:
815 if ( t != NULL )
816 {
817 xfree(t->active);
818 free_xenheap_page(t->maptrack);
819 xfree(t);
820 }
821 return -ENOMEM;
822 }
824 void
825 gnttab_release_mappings(
826 struct domain *d)
827 {
828 struct grant_table *gt = d->grant_table;
829 struct grant_mapping *map;
830 grant_ref_t ref;
831 grant_handle_t handle;
832 struct domain *rd;
833 struct active_grant_entry *act;
834 struct grant_entry *sha;
836 BUG_ON(!test_bit(_DOMF_dying, &d->domain_flags));
838 for ( handle = 0; handle < gt->maptrack_limit; handle++ )
839 {
840 map = &gt->maptrack[handle];
841 if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
842 continue;
844 ref = map->ref;
846 DPRINTK("Grant release (%hu) ref:(%hu) flags:(%x) dom:(%hu)\n",
847 handle, ref, map->flags, map->domid);
849 rd = find_domain_by_id(map->domid);
850 BUG_ON(rd == NULL);
852 spin_lock(&rd->grant_table->lock);
854 act = &rd->grant_table->active[ref];
855 sha = &rd->grant_table->shared[ref];
857 if ( map->flags & GNTMAP_readonly )
858 {
859 if ( map->flags & GNTMAP_device_map )
860 {
861 BUG_ON(!(act->pin & GNTPIN_devr_mask));
862 act->pin -= GNTPIN_devr_inc;
863 put_page(mfn_to_page(act->frame));
864 }
866 if ( map->flags & GNTMAP_host_map )
867 {
868 BUG_ON(!(act->pin & GNTPIN_hstr_mask));
869 act->pin -= GNTPIN_hstr_inc;
870 /* Done implicitly when page tables are destroyed. */
871 /* put_page(mfn_to_page(act->frame)); */
872 }
873 }
874 else
875 {
876 if ( map->flags & GNTMAP_device_map )
877 {
878 BUG_ON(!(act->pin & GNTPIN_devw_mask));
879 act->pin -= GNTPIN_devw_inc;
880 put_page_and_type(mfn_to_page(act->frame));
881 }
883 if ( map->flags & GNTMAP_host_map )
884 {
885 BUG_ON(!(act->pin & GNTPIN_hstw_mask));
886 act->pin -= GNTPIN_hstw_inc;
887 /* Done implicitly when page tables are destroyed. */
888 /* put_page_and_type(mfn_to_page(act->frame)); */
889 }
891 if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
892 gnttab_clear_flag(_GTF_writing, &sha->flags);
893 }
895 if ( act->pin == 0 )
896 gnttab_clear_flag(_GTF_reading, &sha->flags);
898 spin_unlock(&rd->grant_table->lock);
900 put_domain(rd);
902 map->flags = 0;
903 }
904 }
907 void
908 grant_table_destroy(
909 struct domain *d)
910 {
911 struct grant_table *t = d->grant_table;
913 if ( t == NULL )
914 return;
916 free_xenheap_pages(t->shared, ORDER_GRANT_FRAMES);
917 free_xenheap_page(t->maptrack);
918 xfree(t->active);
919 xfree(t);
921 d->grant_table = NULL;
922 }
924 /*
925 * Local variables:
926 * mode: C
927 * c-set-style: "BSD"
928 * c-basic-offset: 4
929 * tab-width: 4
930 * indent-tabs-mode: nil
931 * End:
932 */