ia64/xen-unstable

view xen/common/grant_table.c @ 9581:9ae1f9349b76

fix mfn check of gnttab_transfer().

Signed-off-by: Steven Smith, sos22@cam.ac.uk
From: Isaku Yamahata <yamahata@valinux.co.jp>
author sos22@douglas.cl.cam.ac.uk
date Tue Apr 04 14:04:32 2006 +0100 (2006-04-04)
parents 4293d6760cef
children 5c477ad95dba
line source
1 /******************************************************************************
2 * common/grant_table.c
3 *
4 * Mechanism for granting foreign access to page frames, and receiving
5 * page-ownership transfers.
6 *
7 * Copyright (c) 2005 Christopher Clark
8 * Copyright (c) 2004 K A Fraser
9 * Copyright (c) 2005 Andrew Warfield
10 * Modifications by Geoffrey Lefebvre are (c) Intel Research Cambridge
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
27 #include <xen/lib.h>
28 #include <xen/sched.h>
29 #include <xen/shadow.h>
30 #include <xen/mm.h>
31 #include <xen/trace.h>
32 #include <xen/guest_access.h>
33 #include <acm/acm_hooks.h>
35 #define PIN_FAIL(_lbl, _rc, _f, _a...) \
36 do { \
37 DPRINTK( _f, ## _a ); \
38 rc = (_rc); \
39 goto _lbl; \
40 } while ( 0 )
42 static inline int
43 get_maptrack_handle(
44 grant_table_t *t)
45 {
46 unsigned int h;
47 if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) )
48 return -1;
49 t->maptrack_head = t->maptrack[h].ref_and_flags >> MAPTRACK_REF_SHIFT;
50 t->map_count++;
51 return h;
52 }
54 static inline void
55 put_maptrack_handle(
56 grant_table_t *t, int handle)
57 {
58 t->maptrack[handle].ref_and_flags = t->maptrack_head << MAPTRACK_REF_SHIFT;
59 t->maptrack_head = handle;
60 t->map_count--;
61 }
63 /*
64 * Returns 0 if TLB flush / invalidate required by caller.
65 * va will indicate the address to be invalidated.
66 *
67 * addr is _either_ a host virtual address, or the address of the pte to
68 * update, as indicated by the GNTMAP_contains_pte flag.
69 */
70 static void
71 __gnttab_map_grant_ref(
72 struct gnttab_map_grant_ref *op)
73 {
74 struct domain *ld, *rd;
75 struct vcpu *led;
76 int handle;
77 unsigned long frame = 0;
78 int rc = GNTST_okay;
79 active_grant_entry_t *act;
81 /* Entry details from @rd's shared grant table. */
82 grant_entry_t *sha;
83 domid_t sdom;
84 u16 sflags;
86 /*
87 * We bound the number of times we retry CMPXCHG on memory locations that
88 * we share with a guest OS. The reason is that the guest can modify that
89 * location at a higher rate than we can read-modify-CMPXCHG, so the guest
90 * could cause us to livelock. There are a few cases where it is valid for
91 * the guest to race our updates (e.g., to change the GTF_readonly flag),
92 * so we allow a few retries before failing.
93 */
94 int retries = 0;
96 led = current;
97 ld = led->domain;
99 if ( unlikely(op->ref >= NR_GRANT_ENTRIES) ||
100 unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) )
101 {
102 DPRINTK("Bad ref (%d) or flags (%x).\n", op->ref, op->flags);
103 op->status = GNTST_bad_gntref;
104 return;
105 }
107 if ( acm_pre_grant_map_ref(op->dom) )
108 {
109 op->status = GNTST_permission_denied;
110 return;
111 }
113 if ( unlikely((rd = find_domain_by_id(op->dom)) == NULL) ||
114 unlikely(ld == rd) )
115 {
116 if ( rd != NULL )
117 put_domain(rd);
118 DPRINTK("Could not find domain %d\n", op->dom);
119 op->status = GNTST_bad_domain;
120 return;
121 }
123 /* Get a maptrack handle. */
124 if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) )
125 {
126 int i;
127 grant_mapping_t *new_mt;
128 grant_table_t *lgt = ld->grant_table;
130 if ( (lgt->maptrack_limit << 1) > MAPTRACK_MAX_ENTRIES )
131 {
132 put_domain(rd);
133 DPRINTK("Maptrack table is at maximum size.\n");
134 op->status = GNTST_no_device_space;
135 return;
136 }
138 /* Grow the maptrack table. */
139 new_mt = alloc_xenheap_pages(lgt->maptrack_order + 1);
140 if ( new_mt == NULL )
141 {
142 put_domain(rd);
143 DPRINTK("No more map handles available.\n");
144 op->status = GNTST_no_device_space;
145 return;
146 }
148 memcpy(new_mt, lgt->maptrack, PAGE_SIZE << lgt->maptrack_order);
149 for ( i = lgt->maptrack_limit; i < (lgt->maptrack_limit << 1); i++ )
150 new_mt[i].ref_and_flags = (i+1) << MAPTRACK_REF_SHIFT;
152 free_xenheap_pages(lgt->maptrack, lgt->maptrack_order);
153 lgt->maptrack = new_mt;
154 lgt->maptrack_order += 1;
155 lgt->maptrack_limit <<= 1;
157 DPRINTK("Doubled maptrack size\n");
158 handle = get_maptrack_handle(ld->grant_table);
159 }
161 act = &rd->grant_table->active[op->ref];
162 sha = &rd->grant_table->shared[op->ref];
164 spin_lock(&rd->grant_table->lock);
166 if ( !act->pin ||
167 (!(op->flags & GNTMAP_readonly) &&
168 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask))) )
169 {
170 sflags = sha->flags;
171 sdom = sha->domid;
173 /*
174 * This loop attempts to set the access (reading/writing) flags
175 * in the grant table entry. It tries a cmpxchg on the field
176 * up to five times, and then fails under the assumption that
177 * the guest is misbehaving.
178 */
179 for ( ; ; )
180 {
181 u32 scombo, prev_scombo, new_scombo;
183 if ( unlikely((sflags & GTF_type_mask) != GTF_permit_access) ||
184 unlikely(sdom != led->domain->domain_id) )
185 PIN_FAIL(unlock_out, GNTST_general_error,
186 "Bad flags (%x) or dom (%d). (NB. expected dom %d)\n",
187 sflags, sdom, led->domain->domain_id);
189 /* Merge two 16-bit values into a 32-bit combined update. */
190 /* NB. Endianness! */
191 scombo = ((u32)sdom << 16) | (u32)sflags;
193 new_scombo = scombo | GTF_reading;
194 if ( !(op->flags & GNTMAP_readonly) )
195 {
196 new_scombo |= GTF_writing;
197 if ( unlikely(sflags & GTF_readonly) )
198 PIN_FAIL(unlock_out, GNTST_general_error,
199 "Attempt to write-pin a r/o grant entry.\n");
200 }
202 prev_scombo = cmpxchg((u32 *)&sha->flags, scombo, new_scombo);
204 /* Did the combined update work (did we see what we expected?). */
205 if ( likely(prev_scombo == scombo) )
206 break;
208 if ( retries++ == 4 )
209 PIN_FAIL(unlock_out, GNTST_general_error,
210 "Shared grant entry is unstable.\n");
212 /* Didn't see what we expected. Split out the seen flags & dom. */
213 /* NB. Endianness! */
214 sflags = (u16)prev_scombo;
215 sdom = (u16)(prev_scombo >> 16);
216 }
218 if ( !act->pin )
219 {
220 act->domid = sdom;
221 act->frame = gmfn_to_mfn(rd, sha->frame);
222 }
223 }
224 else if ( (act->pin & 0x80808080U) != 0 )
225 PIN_FAIL(unlock_out, ENOSPC,
226 "Risk of counter overflow %08x\n", act->pin);
228 if ( op->flags & GNTMAP_device_map )
229 act->pin += (op->flags & GNTMAP_readonly) ?
230 GNTPIN_devr_inc : GNTPIN_devw_inc;
231 if ( op->flags & GNTMAP_host_map )
232 act->pin += (op->flags & GNTMAP_readonly) ?
233 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
235 spin_unlock(&rd->grant_table->lock);
237 frame = act->frame;
238 if ( unlikely(!mfn_valid(frame)) ||
239 unlikely(!((op->flags & GNTMAP_readonly) ?
240 get_page(mfn_to_page(frame), rd) :
241 get_page_and_type(mfn_to_page(frame), rd,
242 PGT_writable_page))) )
243 PIN_FAIL(undo_out, GNTST_general_error,
244 "Could not pin the granted frame (%lx)!\n", frame);
246 if ( op->flags & GNTMAP_host_map )
247 {
248 rc = create_grant_host_mapping(op->host_addr, frame, op->flags);
249 if ( rc != GNTST_okay )
250 {
251 if ( !(op->flags & GNTMAP_readonly) )
252 put_page_type(mfn_to_page(frame));
253 put_page(mfn_to_page(frame));
254 goto undo_out;
255 }
257 if ( op->flags & GNTMAP_device_map )
258 {
259 (void)get_page(mfn_to_page(frame), rd);
260 if ( !(op->flags & GNTMAP_readonly) )
261 get_page_type(mfn_to_page(frame), PGT_writable_page);
262 }
263 }
265 TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
267 ld->grant_table->maptrack[handle].domid = op->dom;
268 ld->grant_table->maptrack[handle].ref_and_flags =
269 (op->ref << MAPTRACK_REF_SHIFT) |
270 (op->flags & MAPTRACK_GNTMAP_MASK);
272 op->dev_bus_addr = (u64)frame << PAGE_SHIFT;
273 op->handle = handle;
274 op->status = GNTST_okay;
276 put_domain(rd);
277 return;
279 undo_out:
280 spin_lock(&rd->grant_table->lock);
282 if ( op->flags & GNTMAP_device_map )
283 act->pin -= (op->flags & GNTMAP_readonly) ?
284 GNTPIN_devr_inc : GNTPIN_devw_inc;
285 if ( op->flags & GNTMAP_host_map )
286 act->pin -= (op->flags & GNTMAP_readonly) ?
287 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
289 if ( !(op->flags & GNTMAP_readonly) &&
290 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
291 clear_bit(_GTF_writing, &sha->flags);
293 if ( !act->pin )
294 clear_bit(_GTF_reading, &sha->flags);
296 unlock_out:
297 spin_unlock(&rd->grant_table->lock);
298 op->status = rc;
299 put_maptrack_handle(ld->grant_table, handle);
300 put_domain(rd);
301 }
303 static long
304 gnttab_map_grant_ref(
305 GUEST_HANDLE(gnttab_map_grant_ref_t) uop, unsigned int count)
306 {
307 int i;
308 struct gnttab_map_grant_ref op;
310 for ( i = 0; i < count; i++ )
311 {
312 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
313 return -EFAULT;
314 __gnttab_map_grant_ref(&op);
315 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
316 return -EFAULT;
317 }
319 return 0;
320 }
322 static void
323 __gnttab_unmap_grant_ref(
324 struct gnttab_unmap_grant_ref *op)
325 {
326 domid_t dom;
327 grant_ref_t ref;
328 struct domain *ld, *rd;
329 active_grant_entry_t *act;
330 grant_entry_t *sha;
331 grant_mapping_t *map;
332 u16 flags;
333 s16 rc = 0;
334 unsigned long frame;
336 ld = current->domain;
338 frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT);
340 map = &ld->grant_table->maptrack[op->handle];
342 if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) ||
343 unlikely(!(map->ref_and_flags & MAPTRACK_GNTMAP_MASK)) )
344 {
345 DPRINTK("Bad handle (%d).\n", op->handle);
346 op->status = GNTST_bad_handle;
347 return;
348 }
350 dom = map->domid;
351 ref = map->ref_and_flags >> MAPTRACK_REF_SHIFT;
352 flags = map->ref_and_flags & MAPTRACK_GNTMAP_MASK;
354 if ( unlikely((rd = find_domain_by_id(dom)) == NULL) ||
355 unlikely(ld == rd) )
356 {
357 if ( rd != NULL )
358 put_domain(rd);
359 DPRINTK("Could not find domain %d\n", dom);
360 op->status = GNTST_bad_domain;
361 return;
362 }
364 TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom);
366 act = &rd->grant_table->active[ref];
367 sha = &rd->grant_table->shared[ref];
369 spin_lock(&rd->grant_table->lock);
371 if ( frame == 0 )
372 {
373 frame = act->frame;
374 }
375 else
376 {
377 if ( unlikely(frame != act->frame) )
378 PIN_FAIL(unmap_out, GNTST_general_error,
379 "Bad frame number doesn't match gntref.\n");
380 if ( flags & GNTMAP_device_map )
381 {
382 ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
383 map->ref_and_flags &= ~GNTMAP_device_map;
384 if ( flags & GNTMAP_readonly )
385 {
386 act->pin -= GNTPIN_devr_inc;
387 put_page(mfn_to_page(frame));
388 }
389 else
390 {
391 act->pin -= GNTPIN_devw_inc;
392 put_page_and_type(mfn_to_page(frame));
393 }
394 }
395 }
397 if ( (op->host_addr != 0) && (flags & GNTMAP_host_map) )
398 {
399 if ( (rc = destroy_grant_host_mapping(op->host_addr,
400 frame, flags)) < 0 )
401 goto unmap_out;
403 ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
404 map->ref_and_flags &= ~GNTMAP_host_map;
405 if ( flags & GNTMAP_readonly )
406 {
407 act->pin -= GNTPIN_hstr_inc;
408 put_page(mfn_to_page(frame));
409 }
410 else
411 {
412 act->pin -= GNTPIN_hstw_inc;
413 put_page_and_type(mfn_to_page(frame));
414 }
415 }
417 if ( (map->ref_and_flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
418 {
419 map->ref_and_flags = 0;
420 put_maptrack_handle(ld->grant_table, op->handle);
421 }
423 /* If just unmapped a writable mapping, mark as dirtied */
424 if ( !(flags & GNTMAP_readonly) )
425 gnttab_log_dirty(rd, frame);
427 if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
428 !(flags & GNTMAP_readonly) )
429 clear_bit(_GTF_writing, &sha->flags);
431 if ( act->pin == 0 )
432 clear_bit(_GTF_reading, &sha->flags);
434 unmap_out:
435 op->status = rc;
436 spin_unlock(&rd->grant_table->lock);
437 put_domain(rd);
438 }
440 static long
441 gnttab_unmap_grant_ref(
442 GUEST_HANDLE(gnttab_unmap_grant_ref_t) uop, unsigned int count)
443 {
444 int i;
445 struct gnttab_unmap_grant_ref op;
447 for ( i = 0; i < count; i++ )
448 {
449 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
450 goto fault;
451 __gnttab_unmap_grant_ref(&op);
452 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
453 goto fault;
454 }
456 flush_tlb_mask(current->domain->domain_dirty_cpumask);
457 return 0;
459 fault:
460 flush_tlb_mask(current->domain->domain_dirty_cpumask);
461 return -EFAULT;
462 }
464 static long
465 gnttab_setup_table(
466 GUEST_HANDLE(gnttab_setup_table_t) uop, unsigned int count)
467 {
468 struct gnttab_setup_table op;
469 struct domain *d;
470 int i;
471 unsigned long gmfn;
472 domid_t dom;
474 if ( count != 1 )
475 return -EINVAL;
477 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
478 {
479 DPRINTK("Fault while reading gnttab_setup_table_t.\n");
480 return -EFAULT;
481 }
483 if ( unlikely(op.nr_frames > NR_GRANT_FRAMES) )
484 {
485 DPRINTK("Xen only supports up to %d grant-table frames per domain.\n",
486 NR_GRANT_FRAMES);
487 op.status = GNTST_general_error;
488 goto out;
489 }
491 dom = op.dom;
492 if ( dom == DOMID_SELF )
493 {
494 dom = current->domain->domain_id;
495 }
496 else if ( unlikely(!IS_PRIV(current->domain)) )
497 {
498 op.status = GNTST_permission_denied;
499 goto out;
500 }
502 if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
503 {
504 DPRINTK("Bad domid %d.\n", dom);
505 op.status = GNTST_bad_domain;
506 goto out;
507 }
509 if ( op.nr_frames <= NR_GRANT_FRAMES )
510 {
511 ASSERT(d->grant_table != NULL);
512 op.status = GNTST_okay;
513 for ( i = 0; i < op.nr_frames; i++ )
514 {
515 gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
516 (void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
517 }
518 }
520 put_domain(d);
522 out:
523 if ( unlikely(copy_to_guest(uop, &op, 1)) )
524 return -EFAULT;
526 return 0;
527 }
529 /*
530 * Check that the given grant reference (rd,ref) allows 'ld' to transfer
531 * ownership of a page frame. If so, lock down the grant entry.
532 */
533 static int
534 gnttab_prepare_for_transfer(
535 struct domain *rd, struct domain *ld, grant_ref_t ref)
536 {
537 grant_table_t *rgt;
538 grant_entry_t *sha;
539 domid_t sdom;
540 u16 sflags;
541 u32 scombo, prev_scombo;
542 int retries = 0;
544 if ( unlikely((rgt = rd->grant_table) == NULL) ||
545 unlikely(ref >= NR_GRANT_ENTRIES) )
546 {
547 DPRINTK("Dom %d has no g.t., or ref is bad (%d).\n",
548 rd->domain_id, ref);
549 return 0;
550 }
552 spin_lock(&rgt->lock);
554 sha = &rgt->shared[ref];
556 sflags = sha->flags;
557 sdom = sha->domid;
559 for ( ; ; )
560 {
561 if ( unlikely(sflags != GTF_accept_transfer) ||
562 unlikely(sdom != ld->domain_id) )
563 {
564 DPRINTK("Bad flags (%x) or dom (%d). (NB. expected dom %d)\n",
565 sflags, sdom, ld->domain_id);
566 goto fail;
567 }
569 /* Merge two 16-bit values into a 32-bit combined update. */
570 /* NB. Endianness! */
571 scombo = ((u32)sdom << 16) | (u32)sflags;
573 prev_scombo = cmpxchg((u32 *)&sha->flags, scombo,
574 scombo | GTF_transfer_committed);
576 /* Did the combined update work (did we see what we expected?). */
577 if ( likely(prev_scombo == scombo) )
578 break;
580 if ( retries++ == 4 )
581 {
582 DPRINTK("Shared grant entry is unstable.\n");
583 goto fail;
584 }
586 /* Didn't see what we expected. Split out the seen flags & dom. */
587 /* NB. Endianness! */
588 sflags = (u16)prev_scombo;
589 sdom = (u16)(prev_scombo >> 16);
590 }
592 spin_unlock(&rgt->lock);
593 return 1;
595 fail:
596 spin_unlock(&rgt->lock);
597 return 0;
598 }
600 static long
601 gnttab_transfer(
602 GUEST_HANDLE(gnttab_transfer_t) uop, unsigned int count)
603 {
604 struct domain *d = current->domain;
605 struct domain *e;
606 struct page_info *page;
607 int i;
608 grant_entry_t *sha;
609 struct gnttab_transfer gop;
610 unsigned long mfn;
612 for ( i = 0; i < count; i++ )
613 {
614 /* Read from caller address space. */
615 if ( unlikely(__copy_from_guest_offset(&gop, uop, i, 1)) )
616 {
617 DPRINTK("gnttab_transfer: error reading req %d/%d\n", i, count);
618 return -EFAULT;
619 }
621 mfn = gmfn_to_mfn(d, gop.mfn);
623 /* Check the passed page frame for basic validity. */
624 if ( unlikely(!mfn_valid(mfn)) )
625 {
626 DPRINTK("gnttab_transfer: out-of-range %lx\n",
627 (unsigned long)gop.mfn);
628 gop.status = GNTST_bad_page;
629 goto copyback;
630 }
632 page = mfn_to_page(mfn);
633 if ( unlikely(IS_XEN_HEAP_FRAME(page)) )
634 {
635 DPRINTK("gnttab_transfer: xen frame %lx\n",
636 (unsigned long)gop.mfn);
637 gop.status = GNTST_bad_page;
638 goto copyback;
639 }
641 if ( steal_page_for_grant_transfer(d, page) < 0 )
642 {
643 gop.status = GNTST_bad_page;
644 goto copyback;
645 }
647 /* Find the target domain. */
648 if ( unlikely((e = find_domain_by_id(gop.domid)) == NULL) )
649 {
650 DPRINTK("gnttab_transfer: can't find domain %d\n", gop.domid);
651 page->count_info &= ~(PGC_count_mask|PGC_allocated);
652 free_domheap_page(page);
653 gop.status = GNTST_bad_domain;
654 goto copyback;
655 }
657 spin_lock(&e->page_alloc_lock);
659 /*
660 * Check that 'e' will accept the page and has reservation
661 * headroom. Also, a domain mustn't have PGC_allocated
662 * pages when it is dying.
663 */
664 if ( unlikely(test_bit(_DOMF_dying, &e->domain_flags)) ||
665 unlikely(e->tot_pages >= e->max_pages) ||
666 unlikely(!gnttab_prepare_for_transfer(e, d, gop.ref)) )
667 {
668 if ( !test_bit(_DOMF_dying, &e->domain_flags) )
669 DPRINTK("gnttab_transfer: Transferee has no reservation "
670 "headroom (%d,%d) or provided a bad grant ref (%08x) "
671 "or is dying (%lx)\n",
672 e->tot_pages, e->max_pages, gop.ref, e->domain_flags);
673 spin_unlock(&e->page_alloc_lock);
674 put_domain(e);
675 page->count_info &= ~(PGC_count_mask|PGC_allocated);
676 free_domheap_page(page);
677 gop.status = GNTST_general_error;
678 goto copyback;
679 }
681 /* Okay, add the page to 'e'. */
682 if ( unlikely(e->tot_pages++ == 0) )
683 get_knownalive_domain(e);
684 list_add_tail(&page->list, &e->page_list);
685 page_set_owner(page, e);
687 spin_unlock(&e->page_alloc_lock);
689 TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
691 /* Tell the guest about its new page frame. */
692 sha = &e->grant_table->shared[gop.ref];
693 guest_physmap_add_page(e, sha->frame, mfn);
694 sha->frame = mfn;
695 wmb();
696 sha->flags |= GTF_transfer_completed;
698 put_domain(e);
700 gop.status = GNTST_okay;
702 copyback:
703 if ( unlikely(__copy_to_guest_offset(uop, i, &gop, 1)) )
704 {
705 DPRINTK("gnttab_transfer: error writing resp %d/%d\n", i, count);
706 return -EFAULT;
707 }
708 }
710 return 0;
711 }
713 long
714 do_grant_table_op(
715 unsigned int cmd, GUEST_HANDLE(void) uop, unsigned int count)
716 {
717 long rc;
718 struct domain *d = current->domain;
720 if ( count > 512 )
721 return -EINVAL;
723 LOCK_BIGLOCK(d);
725 sync_pagetable_state(d);
727 rc = -EFAULT;
728 switch ( cmd )
729 {
730 case GNTTABOP_map_grant_ref:
731 {
732 GUEST_HANDLE(gnttab_map_grant_ref_t) map =
733 guest_handle_cast(uop, gnttab_map_grant_ref_t);
734 if ( unlikely(!guest_handle_okay(map, count)) )
735 goto out;
736 rc = gnttab_map_grant_ref(map, count);
737 break;
738 }
739 case GNTTABOP_unmap_grant_ref:
740 {
741 GUEST_HANDLE(gnttab_unmap_grant_ref_t) unmap =
742 guest_handle_cast(uop, gnttab_unmap_grant_ref_t);
743 if ( unlikely(!guest_handle_okay(unmap, count)) )
744 goto out;
745 rc = gnttab_unmap_grant_ref(unmap, count);
746 break;
747 }
748 case GNTTABOP_setup_table:
749 {
750 rc = gnttab_setup_table(
751 guest_handle_cast(uop, gnttab_setup_table_t), count);
752 break;
753 }
754 case GNTTABOP_transfer:
755 {
756 GUEST_HANDLE(gnttab_transfer_t) transfer =
757 guest_handle_cast(uop, gnttab_transfer_t);
758 if ( unlikely(!guest_handle_okay(transfer, count)) )
759 goto out;
760 rc = gnttab_transfer(transfer, count);
761 break;
762 }
763 default:
764 rc = -ENOSYS;
765 break;
766 }
768 out:
769 UNLOCK_BIGLOCK(d);
771 return rc;
772 }
774 int
775 grant_table_create(
776 struct domain *d)
777 {
778 grant_table_t *t;
779 int i;
781 if ( (t = xmalloc(grant_table_t)) == NULL )
782 goto no_mem;
784 /* Simple stuff. */
785 memset(t, 0, sizeof(*t));
786 spin_lock_init(&t->lock);
788 /* Active grant table. */
789 if ( (t->active = xmalloc_array(active_grant_entry_t, NR_GRANT_ENTRIES))
790 == NULL )
791 goto no_mem;
792 memset(t->active, 0, sizeof(active_grant_entry_t) * NR_GRANT_ENTRIES);
794 /* Tracking of mapped foreign frames table */
795 if ( (t->maptrack = alloc_xenheap_page()) == NULL )
796 goto no_mem;
797 t->maptrack_order = 0;
798 t->maptrack_limit = PAGE_SIZE / sizeof(grant_mapping_t);
799 memset(t->maptrack, 0, PAGE_SIZE);
800 for ( i = 0; i < t->maptrack_limit; i++ )
801 t->maptrack[i].ref_and_flags = (i+1) << MAPTRACK_REF_SHIFT;
803 /* Shared grant table. */
804 t->shared = alloc_xenheap_pages(ORDER_GRANT_FRAMES);
805 if ( t->shared == NULL )
806 goto no_mem;
807 memset(t->shared, 0, NR_GRANT_FRAMES * PAGE_SIZE);
809 for ( i = 0; i < NR_GRANT_FRAMES; i++ )
810 gnttab_create_shared_page(d, t, i);
812 /* Okay, install the structure. */
813 wmb(); /* avoid races with lock-free access to d->grant_table */
814 d->grant_table = t;
815 return 0;
817 no_mem:
818 if ( t != NULL )
819 {
820 xfree(t->active);
821 free_xenheap_page(t->maptrack);
822 xfree(t);
823 }
824 return -ENOMEM;
825 }
827 void
828 gnttab_release_mappings(
829 struct domain *d)
830 {
831 grant_table_t *gt = d->grant_table;
832 grant_mapping_t *map;
833 grant_ref_t ref;
834 grant_handle_t handle;
835 struct domain *rd;
836 active_grant_entry_t *act;
837 grant_entry_t *sha;
839 BUG_ON(!test_bit(_DOMF_dying, &d->domain_flags));
841 for ( handle = 0; handle < gt->maptrack_limit; handle++ )
842 {
843 map = &gt->maptrack[handle];
844 if ( !(map->ref_and_flags & (GNTMAP_device_map|GNTMAP_host_map)) )
845 continue;
847 ref = map->ref_and_flags >> MAPTRACK_REF_SHIFT;
849 DPRINTK("Grant release (%hu) ref:(%hu) flags:(%x) dom:(%hu)\n",
850 handle, ref, map->ref_and_flags & MAPTRACK_GNTMAP_MASK,
851 map->domid);
853 rd = find_domain_by_id(map->domid);
854 BUG_ON(rd == NULL);
856 spin_lock(&rd->grant_table->lock);
858 act = &rd->grant_table->active[ref];
859 sha = &rd->grant_table->shared[ref];
861 if ( map->ref_and_flags & GNTMAP_readonly )
862 {
863 if ( map->ref_and_flags & GNTMAP_device_map )
864 {
865 BUG_ON(!(act->pin & GNTPIN_devr_mask));
866 act->pin -= GNTPIN_devr_inc;
867 put_page(mfn_to_page(act->frame));
868 }
870 if ( map->ref_and_flags & GNTMAP_host_map )
871 {
872 BUG_ON(!(act->pin & GNTPIN_hstr_mask));
873 act->pin -= GNTPIN_hstr_inc;
874 /* Done implicitly when page tables are destroyed. */
875 /* put_page(mfn_to_page(act->frame)); */
876 }
877 }
878 else
879 {
880 if ( map->ref_and_flags & GNTMAP_device_map )
881 {
882 BUG_ON(!(act->pin & GNTPIN_devw_mask));
883 act->pin -= GNTPIN_devw_inc;
884 put_page_and_type(mfn_to_page(act->frame));
885 }
887 if ( map->ref_and_flags & GNTMAP_host_map )
888 {
889 BUG_ON(!(act->pin & GNTPIN_hstw_mask));
890 act->pin -= GNTPIN_hstw_inc;
891 /* Done implicitly when page tables are destroyed. */
892 /* put_page_and_type(mfn_to_page(act->frame)); */
893 }
895 if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
896 clear_bit(_GTF_writing, &sha->flags);
897 }
899 if ( act->pin == 0 )
900 clear_bit(_GTF_reading, &sha->flags);
902 spin_unlock(&rd->grant_table->lock);
904 put_domain(rd);
906 map->ref_and_flags = 0;
907 }
908 }
911 void
912 grant_table_destroy(
913 struct domain *d)
914 {
915 grant_table_t *t = d->grant_table;
917 if ( t == NULL )
918 return;
920 free_xenheap_pages(t->shared, ORDER_GRANT_FRAMES);
921 free_xenheap_page(t->maptrack);
922 xfree(t->active);
923 xfree(t);
925 d->grant_table = NULL;
926 }
928 /*
929 * Local variables:
930 * mode: C
931 * c-set-style: "BSD"
932 * c-basic-offset: 4
933 * tab-width: 4
934 * indent-tabs-mode: nil
935 * End:
936 */