ia64/xen-unstable

view xen/common/grant_table.c @ 14196:9d36026b1b43

xen: Cleanups and bug fixes after the rcu_lock_domain patch.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Mar 01 11:38:55 2007 +0000 (2007-03-01)
parents 09a9b6d6c356
children 035d41b6c94c
line source
1 /******************************************************************************
2 * common/grant_table.c
3 *
4 * Mechanism for granting foreign access to page frames, and receiving
5 * page-ownership transfers.
6 *
7 * Copyright (c) 2005-2006 Christopher Clark
8 * Copyright (c) 2004 K A Fraser
9 * Copyright (c) 2005 Andrew Warfield
10 * Modifications by Geoffrey Lefebvre are (c) Intel Research Cambridge
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
27 #include <xen/config.h>
28 #include <xen/iocap.h>
29 #include <xen/lib.h>
30 #include <xen/sched.h>
31 #include <xen/shadow.h>
32 #include <xen/mm.h>
33 #include <xen/trace.h>
34 #include <xen/guest_access.h>
35 #include <xen/domain_page.h>
36 #include <acm/acm_hooks.h>
38 unsigned int max_nr_grant_frames = DEFAULT_MAX_NR_GRANT_FRAMES;
39 integer_param("gnttab_max_nr_frames", max_nr_grant_frames);
41 /* The maximum number of grant mappings is defined as a multiplier of the
42 * maximum number of grant table entries. This defines the multiplier used.
43 * Pretty arbitrary. [POLICY]
44 */
45 #define MAX_MAPTRACK_TO_GRANTS_RATIO 8
47 /*
48 * The first two members of a grant entry are updated as a combined pair.
49 * The following union allows that to happen in an endian-neutral fashion.
50 */
51 union grant_combo {
52 uint32_t word;
53 struct {
54 uint16_t flags;
55 domid_t domid;
56 } shorts;
57 };
59 #define PIN_FAIL(_lbl, _rc, _f, _a...) \
60 do { \
61 gdprintk(XENLOG_WARNING, _f, ## _a ); \
62 rc = (_rc); \
63 goto _lbl; \
64 } while ( 0 )
66 #define MAPTRACK_PER_PAGE (PAGE_SIZE / sizeof(struct grant_mapping))
67 #define maptrack_entry(t, e) \
68 ((t)->maptrack[(e)/MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE])
70 static inline unsigned int
71 nr_maptrack_frames(struct grant_table *t)
72 {
73 return t->maptrack_limit / MAPTRACK_PER_PAGE;
74 }
76 static unsigned inline int max_nr_maptrack_frames(void)
77 {
78 return (max_nr_grant_frames * MAX_MAPTRACK_TO_GRANTS_RATIO);
79 }
81 static inline unsigned int
82 num_act_frames_from_sha_frames(const unsigned int num)
83 {
84 /* How many frames are needed for the active grant table,
85 * given the size of the shared grant table?
86 *
87 * act_per_page = PAGE_SIZE / sizeof(active_grant_entry_t);
88 * sha_per_page = PAGE_SIZE / sizeof(grant_entry_t);
89 * num_sha_entries = num * sha_per_page;
90 * num_act_frames = (num_sha_entries + (act_per_page-1)) / act_per_page;
91 */
92 return ((num * (PAGE_SIZE / sizeof(grant_entry_t))) +
93 ((PAGE_SIZE / sizeof(struct active_grant_entry))-1))
94 / (PAGE_SIZE / sizeof(struct active_grant_entry));
95 }
97 static inline unsigned int
98 nr_active_grant_frames(struct grant_table *gt)
99 {
100 return num_act_frames_from_sha_frames(nr_grant_frames(gt));
101 }
103 #define SHGNT_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_t))
104 #define shared_entry(t, e) \
105 ((t)->shared[(e)/SHGNT_PER_PAGE][(e)%SHGNT_PER_PAGE])
106 #define ACGNT_PER_PAGE (PAGE_SIZE / sizeof(struct active_grant_entry))
107 #define active_entry(t, e) \
108 ((t)->active[(e)/ACGNT_PER_PAGE][(e)%ACGNT_PER_PAGE])
110 static inline int
111 __get_maptrack_handle(
112 struct grant_table *t)
113 {
114 unsigned int h;
115 if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) )
116 return -1;
117 t->maptrack_head = maptrack_entry(t, h).ref;
118 t->map_count++;
119 return h;
120 }
122 static inline void
123 put_maptrack_handle(
124 struct grant_table *t, int handle)
125 {
126 maptrack_entry(t, handle).ref = t->maptrack_head;
127 t->maptrack_head = handle;
128 t->map_count--;
129 }
131 static inline int
132 get_maptrack_handle(
133 struct grant_table *lgt)
134 {
135 int i;
136 grant_handle_t handle;
137 struct grant_mapping *new_mt;
138 unsigned int new_mt_limit, nr_frames;
140 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
141 {
142 spin_lock(&lgt->lock);
144 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
145 {
146 nr_frames = nr_maptrack_frames(lgt);
147 if ( nr_frames >= max_nr_maptrack_frames() )
148 {
149 spin_unlock(&lgt->lock);
150 return -1;
151 }
153 new_mt = alloc_xenheap_page();
154 if ( new_mt == NULL )
155 {
156 spin_unlock(&lgt->lock);
157 return -1;
158 }
160 memset(new_mt, 0, PAGE_SIZE);
162 new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE;
164 for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
165 {
166 new_mt[i % MAPTRACK_PER_PAGE].ref = i+1;
167 new_mt[i % MAPTRACK_PER_PAGE].flags = 0;
168 }
170 lgt->maptrack[nr_frames] = new_mt;
171 lgt->maptrack_limit = new_mt_limit;
173 gdprintk(XENLOG_INFO,
174 "Increased maptrack size to %u frames.\n", nr_frames + 1);
175 handle = __get_maptrack_handle(lgt);
176 }
178 spin_unlock(&lgt->lock);
179 }
180 return handle;
181 }
183 /*
184 * Returns 0 if TLB flush / invalidate required by caller.
185 * va will indicate the address to be invalidated.
186 *
187 * addr is _either_ a host virtual address, or the address of the pte to
188 * update, as indicated by the GNTMAP_contains_pte flag.
189 */
190 static void
191 __gnttab_map_grant_ref(
192 struct gnttab_map_grant_ref *op)
193 {
194 struct domain *ld, *rd;
195 struct vcpu *led;
196 int handle;
197 unsigned long frame = 0;
198 int rc = GNTST_okay;
199 struct active_grant_entry *act;
200 struct grant_mapping *mt;
201 grant_entry_t *sha;
202 union grant_combo scombo, prev_scombo, new_scombo;
204 /*
205 * We bound the number of times we retry CMPXCHG on memory locations that
206 * we share with a guest OS. The reason is that the guest can modify that
207 * location at a higher rate than we can read-modify-CMPXCHG, so the guest
208 * could cause us to livelock. There are a few cases where it is valid for
209 * the guest to race our updates (e.g., to change the GTF_readonly flag),
210 * so we allow a few retries before failing.
211 */
212 int retries = 0;
214 led = current;
215 ld = led->domain;
217 if ( unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) )
218 {
219 gdprintk(XENLOG_INFO, "Bad flags in grant map op (%x).\n", op->flags);
220 op->status = GNTST_bad_gntref;
221 return;
222 }
224 if ( acm_pre_grant_map_ref(op->dom) )
225 {
226 op->status = GNTST_permission_denied;
227 return;
228 }
230 if ( unlikely((rd = rcu_lock_domain_by_id(op->dom)) == NULL) )
231 {
232 gdprintk(XENLOG_INFO, "Could not find domain %d\n", op->dom);
233 op->status = GNTST_bad_domain;
234 return;
235 }
237 if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) )
238 {
239 rcu_unlock_domain(rd);
240 gdprintk(XENLOG_INFO, "Failed to obtain maptrack handle.\n");
241 op->status = GNTST_no_device_space;
242 return;
243 }
245 spin_lock(&rd->grant_table->lock);
247 /* Bounds check on the grant ref */
248 if ( unlikely(op->ref >= nr_grant_entries(rd->grant_table)))
249 PIN_FAIL(unlock_out, GNTST_bad_gntref, "Bad ref (%d).\n", op->ref);
251 act = &active_entry(rd->grant_table, op->ref);
252 sha = &shared_entry(rd->grant_table, op->ref);
254 /* If already pinned, check the active domid and avoid refcnt overflow. */
255 if ( act->pin &&
256 ((act->domid != ld->domain_id) ||
257 (act->pin & 0x80808080U) != 0) )
258 PIN_FAIL(unlock_out, GNTST_general_error,
259 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
260 act->domid, ld->domain_id, act->pin);
262 if ( !act->pin ||
263 (!(op->flags & GNTMAP_readonly) &&
264 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask))) )
265 {
266 scombo.word = *(u32 *)&sha->flags;
268 /*
269 * This loop attempts to set the access (reading/writing) flags
270 * in the grant table entry. It tries a cmpxchg on the field
271 * up to five times, and then fails under the assumption that
272 * the guest is misbehaving.
273 */
274 for ( ; ; )
275 {
276 /* If not already pinned, check the grant domid and type. */
277 if ( !act->pin &&
278 (((scombo.shorts.flags & GTF_type_mask) !=
279 GTF_permit_access) ||
280 (scombo.shorts.domid != ld->domain_id)) )
281 PIN_FAIL(unlock_out, GNTST_general_error,
282 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
283 scombo.shorts.flags, scombo.shorts.domid,
284 ld->domain_id);
286 new_scombo = scombo;
287 new_scombo.shorts.flags |= GTF_reading;
289 if ( !(op->flags & GNTMAP_readonly) )
290 {
291 new_scombo.shorts.flags |= GTF_writing;
292 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
293 PIN_FAIL(unlock_out, GNTST_general_error,
294 "Attempt to write-pin a r/o grant entry.\n");
295 }
297 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
298 scombo.word, new_scombo.word);
299 if ( likely(prev_scombo.word == scombo.word) )
300 break;
302 if ( retries++ == 4 )
303 PIN_FAIL(unlock_out, GNTST_general_error,
304 "Shared grant entry is unstable.\n");
306 scombo = prev_scombo;
307 }
309 if ( !act->pin )
310 {
311 act->domid = scombo.shorts.domid;
312 act->frame = gmfn_to_mfn(rd, sha->frame);
313 }
314 }
316 if ( op->flags & GNTMAP_device_map )
317 act->pin += (op->flags & GNTMAP_readonly) ?
318 GNTPIN_devr_inc : GNTPIN_devw_inc;
319 if ( op->flags & GNTMAP_host_map )
320 act->pin += (op->flags & GNTMAP_readonly) ?
321 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
323 frame = act->frame;
325 spin_unlock(&rd->grant_table->lock);
327 if ( unlikely(!mfn_valid(frame)) ||
328 unlikely(!((op->flags & GNTMAP_readonly) ?
329 get_page(mfn_to_page(frame), rd) :
330 get_page_and_type(mfn_to_page(frame), rd,
331 PGT_writable_page))) )
332 {
333 if ( !test_bit(_DOMF_dying, &rd->domain_flags) )
334 gdprintk(XENLOG_WARNING, "Could not pin grant frame %lx\n", frame);
335 rc = GNTST_general_error;
336 goto undo_out;
337 }
339 if ( op->flags & GNTMAP_host_map )
340 {
341 rc = create_grant_host_mapping(op->host_addr, frame, op->flags);
342 if ( rc != GNTST_okay )
343 {
344 if ( !(op->flags & GNTMAP_readonly) )
345 put_page_type(mfn_to_page(frame));
346 put_page(mfn_to_page(frame));
347 goto undo_out;
348 }
350 if ( op->flags & GNTMAP_device_map )
351 {
352 (void)get_page(mfn_to_page(frame), rd);
353 if ( !(op->flags & GNTMAP_readonly) )
354 get_page_type(mfn_to_page(frame), PGT_writable_page);
355 }
356 }
358 TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
360 mt = &maptrack_entry(ld->grant_table, handle);
361 mt->domid = op->dom;
362 mt->ref = op->ref;
363 mt->flags = op->flags;
365 op->dev_bus_addr = (u64)frame << PAGE_SHIFT;
366 op->handle = handle;
367 op->status = GNTST_okay;
369 rcu_unlock_domain(rd);
370 return;
372 undo_out:
373 spin_lock(&rd->grant_table->lock);
375 act = &active_entry(rd->grant_table, op->ref);
376 sha = &shared_entry(rd->grant_table, op->ref);
378 if ( op->flags & GNTMAP_device_map )
379 act->pin -= (op->flags & GNTMAP_readonly) ?
380 GNTPIN_devr_inc : GNTPIN_devw_inc;
381 if ( op->flags & GNTMAP_host_map )
382 act->pin -= (op->flags & GNTMAP_readonly) ?
383 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
385 if ( !(op->flags & GNTMAP_readonly) &&
386 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
387 gnttab_clear_flag(_GTF_writing, &sha->flags);
389 if ( !act->pin )
390 gnttab_clear_flag(_GTF_reading, &sha->flags);
392 unlock_out:
393 spin_unlock(&rd->grant_table->lock);
394 op->status = rc;
395 put_maptrack_handle(ld->grant_table, handle);
396 rcu_unlock_domain(rd);
397 }
399 static long
400 gnttab_map_grant_ref(
401 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) uop, unsigned int count)
402 {
403 int i;
404 struct gnttab_map_grant_ref op;
406 for ( i = 0; i < count; i++ )
407 {
408 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
409 return -EFAULT;
410 __gnttab_map_grant_ref(&op);
411 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
412 return -EFAULT;
413 }
415 return 0;
416 }
418 static void
419 __gnttab_unmap_grant_ref(
420 struct gnttab_unmap_grant_ref *op)
421 {
422 domid_t dom;
423 grant_ref_t ref;
424 struct domain *ld, *rd;
425 struct active_grant_entry *act;
426 grant_entry_t *sha;
427 struct grant_mapping *map;
428 u16 flags;
429 s16 rc = 0;
430 unsigned long frame;
432 ld = current->domain;
434 frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT);
436 if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) )
437 {
438 gdprintk(XENLOG_INFO, "Bad handle (%d).\n", op->handle);
439 op->status = GNTST_bad_handle;
440 return;
441 }
443 map = &maptrack_entry(ld->grant_table, op->handle);
445 if ( unlikely(!map->flags) )
446 {
447 gdprintk(XENLOG_INFO, "Zero flags for handle (%d).\n", op->handle);
448 op->status = GNTST_bad_handle;
449 return;
450 }
452 dom = map->domid;
453 ref = map->ref;
454 flags = map->flags;
456 if ( unlikely((rd = rcu_lock_domain_by_id(dom)) == NULL) )
457 {
458 /* This can happen when a grant is implicitly unmapped. */
459 gdprintk(XENLOG_INFO, "Could not find domain %d\n", dom);
460 domain_crash(ld); /* naughty... */
461 return;
462 }
464 TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom);
466 spin_lock(&rd->grant_table->lock);
468 act = &active_entry(rd->grant_table, ref);
469 sha = &shared_entry(rd->grant_table, ref);
471 if ( frame == 0 )
472 {
473 frame = act->frame;
474 }
475 else
476 {
477 if ( unlikely(frame != act->frame) )
478 PIN_FAIL(unmap_out, GNTST_general_error,
479 "Bad frame number doesn't match gntref.\n");
480 if ( flags & GNTMAP_device_map )
481 {
482 ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
483 map->flags &= ~GNTMAP_device_map;
484 if ( flags & GNTMAP_readonly )
485 {
486 act->pin -= GNTPIN_devr_inc;
487 put_page(mfn_to_page(frame));
488 }
489 else
490 {
491 act->pin -= GNTPIN_devw_inc;
492 put_page_and_type(mfn_to_page(frame));
493 }
494 }
495 }
497 if ( (op->host_addr != 0) && (flags & GNTMAP_host_map) )
498 {
499 if ( (rc = destroy_grant_host_mapping(op->host_addr,
500 frame, flags)) < 0 )
501 goto unmap_out;
503 ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
504 map->flags &= ~GNTMAP_host_map;
505 if ( flags & GNTMAP_readonly )
506 {
507 act->pin -= GNTPIN_hstr_inc;
508 put_page(mfn_to_page(frame));
509 }
510 else
511 {
512 act->pin -= GNTPIN_hstw_inc;
513 put_page_and_type(mfn_to_page(frame));
514 }
515 }
517 if ( (map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
518 {
519 map->flags = 0;
520 put_maptrack_handle(ld->grant_table, op->handle);
521 }
523 /* If just unmapped a writable mapping, mark as dirtied */
524 if ( !(flags & GNTMAP_readonly) )
525 gnttab_mark_dirty(rd, frame);
527 if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
528 !(flags & GNTMAP_readonly) )
529 gnttab_clear_flag(_GTF_writing, &sha->flags);
531 if ( act->pin == 0 )
532 gnttab_clear_flag(_GTF_reading, &sha->flags);
534 unmap_out:
535 op->status = rc;
536 spin_unlock(&rd->grant_table->lock);
537 rcu_unlock_domain(rd);
538 }
540 static long
541 gnttab_unmap_grant_ref(
542 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) uop, unsigned int count)
543 {
544 int i;
545 struct gnttab_unmap_grant_ref op;
547 for ( i = 0; i < count; i++ )
548 {
549 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
550 goto fault;
551 __gnttab_unmap_grant_ref(&op);
552 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
553 goto fault;
554 }
556 flush_tlb_mask(current->domain->domain_dirty_cpumask);
557 return 0;
559 fault:
560 flush_tlb_mask(current->domain->domain_dirty_cpumask);
561 return -EFAULT;
562 }
564 int
565 gnttab_grow_table(struct domain *d, unsigned int req_nr_frames)
566 {
567 /* d's grant table lock must be held by the caller */
569 struct grant_table *gt = d->grant_table;
570 unsigned int i;
572 ASSERT(req_nr_frames <= max_nr_grant_frames);
574 gdprintk(XENLOG_INFO,
575 "Expanding dom (%d) grant table from (%d) to (%d) frames.\n",
576 d->domain_id, nr_grant_frames(gt), req_nr_frames);
578 /* Active */
579 for ( i = nr_active_grant_frames(gt);
580 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
581 {
582 if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
583 goto active_alloc_failed;
584 memset(gt->active[i], 0, PAGE_SIZE);
585 }
587 /* Shared */
588 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
589 {
590 if ( (gt->shared[i] = alloc_xenheap_page()) == NULL )
591 goto shared_alloc_failed;
592 memset(gt->shared[i], 0, PAGE_SIZE);
593 }
595 /* Share the new shared frames with the recipient domain */
596 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
597 gnttab_create_shared_page(d, gt, i);
599 gt->nr_grant_frames = req_nr_frames;
601 return 1;
603 shared_alloc_failed:
604 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
605 {
606 free_xenheap_page(gt->shared[i]);
607 gt->shared[i] = NULL;
608 }
609 active_alloc_failed:
610 for ( i = nr_active_grant_frames(gt);
611 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
612 {
613 free_xenheap_page(gt->active[i]);
614 gt->active[i] = NULL;
615 }
616 gdprintk(XENLOG_INFO, "Allocation failure when expanding grant table.\n");
617 return 0;
618 }
620 static long
621 gnttab_setup_table(
622 XEN_GUEST_HANDLE(gnttab_setup_table_t) uop, unsigned int count)
623 {
624 struct gnttab_setup_table op;
625 struct domain *d;
626 int i;
627 unsigned long gmfn;
628 domid_t dom;
630 if ( count != 1 )
631 return -EINVAL;
633 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
634 {
635 gdprintk(XENLOG_INFO, "Fault while reading gnttab_setup_table_t.\n");
636 return -EFAULT;
637 }
639 if ( unlikely(op.nr_frames > max_nr_grant_frames) )
640 {
641 gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table frames"
642 " per domain.\n",
643 max_nr_grant_frames);
644 op.status = GNTST_general_error;
645 goto out;
646 }
648 dom = op.dom;
649 if ( dom == DOMID_SELF )
650 {
651 dom = current->domain->domain_id;
652 }
653 else if ( unlikely(!IS_PRIV(current->domain)) )
654 {
655 op.status = GNTST_permission_denied;
656 goto out;
657 }
659 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
660 {
661 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
662 op.status = GNTST_bad_domain;
663 goto out;
664 }
666 spin_lock(&d->grant_table->lock);
668 if ( (op.nr_frames > nr_grant_frames(d->grant_table)) &&
669 !gnttab_grow_table(d, op.nr_frames) )
670 {
671 gdprintk(XENLOG_INFO,
672 "Expand grant table to %d failed. Current: %d Max: %d.\n",
673 op.nr_frames,
674 nr_grant_frames(d->grant_table),
675 max_nr_grant_frames);
676 op.status = GNTST_general_error;
677 goto setup_unlock_out;
678 }
680 op.status = GNTST_okay;
681 for ( i = 0; i < op.nr_frames; i++ )
682 {
683 gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
684 (void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
685 }
687 setup_unlock_out:
688 spin_unlock(&d->grant_table->lock);
690 rcu_unlock_domain(d);
692 out:
693 if ( unlikely(copy_to_guest(uop, &op, 1)) )
694 return -EFAULT;
696 return 0;
697 }
699 static long
700 gnttab_query_size(
701 XEN_GUEST_HANDLE(gnttab_query_size_t) uop, unsigned int count)
702 {
703 struct gnttab_query_size op;
704 struct domain *d;
705 domid_t dom;
707 if ( count != 1 )
708 return -EINVAL;
710 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
711 {
712 gdprintk(XENLOG_INFO, "Fault while reading gnttab_query_size_t.\n");
713 return -EFAULT;
714 }
716 dom = op.dom;
717 if ( dom == DOMID_SELF )
718 {
719 dom = current->domain->domain_id;
720 }
721 else if ( unlikely(!IS_PRIV(current->domain)) )
722 {
723 op.status = GNTST_permission_denied;
724 goto query_out;
725 }
727 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
728 {
729 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
730 op.status = GNTST_bad_domain;
731 goto query_out;
732 }
734 spin_lock(&d->grant_table->lock);
736 op.nr_frames = nr_grant_frames(d->grant_table);
737 op.max_nr_frames = max_nr_grant_frames;
738 op.status = GNTST_okay;
740 spin_unlock(&d->grant_table->lock);
742 rcu_unlock_domain(d);
744 query_out:
745 if ( unlikely(copy_to_guest(uop, &op, 1)) )
746 return -EFAULT;
748 return 0;
749 }
751 /*
752 * Check that the given grant reference (rd,ref) allows 'ld' to transfer
753 * ownership of a page frame. If so, lock down the grant entry.
754 */
755 static int
756 gnttab_prepare_for_transfer(
757 struct domain *rd, struct domain *ld, grant_ref_t ref)
758 {
759 struct grant_table *rgt;
760 struct grant_entry *sha;
761 union grant_combo scombo, prev_scombo, new_scombo;
762 int retries = 0;
764 if ( unlikely((rgt = rd->grant_table) == NULL) )
765 {
766 gdprintk(XENLOG_INFO, "Dom %d has no grant table.\n", rd->domain_id);
767 return 0;
768 }
770 spin_lock(&rgt->lock);
772 if ( unlikely(ref >= nr_grant_entries(rd->grant_table)) )
773 {
774 gdprintk(XENLOG_INFO,
775 "Bad grant reference (%d) for transfer to domain(%d).\n",
776 ref, rd->domain_id);
777 goto fail;
778 }
780 sha = &shared_entry(rgt, ref);
782 scombo.word = *(u32 *)&sha->flags;
784 for ( ; ; )
785 {
786 if ( unlikely(scombo.shorts.flags != GTF_accept_transfer) ||
787 unlikely(scombo.shorts.domid != ld->domain_id) )
788 {
789 gdprintk(XENLOG_INFO, "Bad flags (%x) or dom (%d). "
790 "(NB. expected dom %d)\n",
791 scombo.shorts.flags, scombo.shorts.domid,
792 ld->domain_id);
793 goto fail;
794 }
796 new_scombo = scombo;
797 new_scombo.shorts.flags |= GTF_transfer_committed;
799 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
800 scombo.word, new_scombo.word);
801 if ( likely(prev_scombo.word == scombo.word) )
802 break;
804 if ( retries++ == 4 )
805 {
806 gdprintk(XENLOG_WARNING, "Shared grant entry is unstable.\n");
807 goto fail;
808 }
810 scombo = prev_scombo;
811 }
813 spin_unlock(&rgt->lock);
814 return 1;
816 fail:
817 spin_unlock(&rgt->lock);
818 return 0;
819 }
821 static long
822 gnttab_transfer(
823 XEN_GUEST_HANDLE(gnttab_transfer_t) uop, unsigned int count)
824 {
825 struct domain *d = current->domain;
826 struct domain *e;
827 struct page_info *page;
828 int i;
829 grant_entry_t *sha;
830 struct gnttab_transfer gop;
831 unsigned long mfn;
833 for ( i = 0; i < count; i++ )
834 {
835 /* Read from caller address space. */
836 if ( unlikely(__copy_from_guest_offset(&gop, uop, i, 1)) )
837 {
838 gdprintk(XENLOG_INFO, "gnttab_transfer: error reading req %d/%d\n",
839 i, count);
840 return -EFAULT;
841 }
843 mfn = gmfn_to_mfn(d, gop.mfn);
845 /* Check the passed page frame for basic validity. */
846 if ( unlikely(!mfn_valid(mfn)) )
847 {
848 gdprintk(XENLOG_INFO, "gnttab_transfer: out-of-range %lx\n",
849 (unsigned long)gop.mfn);
850 gop.status = GNTST_bad_page;
851 goto copyback;
852 }
854 page = mfn_to_page(mfn);
855 if ( unlikely(IS_XEN_HEAP_FRAME(page)) )
856 {
857 gdprintk(XENLOG_INFO, "gnttab_transfer: xen frame %lx\n",
858 (unsigned long)gop.mfn);
859 gop.status = GNTST_bad_page;
860 goto copyback;
861 }
863 if ( steal_page(d, page, 0) < 0 )
864 {
865 gop.status = GNTST_bad_page;
866 goto copyback;
867 }
869 /* Find the target domain. */
870 if ( unlikely((e = rcu_lock_domain_by_id(gop.domid)) == NULL) )
871 {
872 gdprintk(XENLOG_INFO, "gnttab_transfer: can't find domain %d\n",
873 gop.domid);
874 page->count_info &= ~(PGC_count_mask|PGC_allocated);
875 free_domheap_page(page);
876 gop.status = GNTST_bad_domain;
877 goto copyback;
878 }
880 spin_lock(&e->page_alloc_lock);
882 /*
883 * Check that 'e' will accept the page and has reservation
884 * headroom. Also, a domain mustn't have PGC_allocated
885 * pages when it is dying.
886 */
887 if ( unlikely(test_bit(_DOMF_dying, &e->domain_flags)) ||
888 unlikely(e->tot_pages >= e->max_pages) ||
889 unlikely(!gnttab_prepare_for_transfer(e, d, gop.ref)) )
890 {
891 if ( !test_bit(_DOMF_dying, &e->domain_flags) )
892 gdprintk(XENLOG_INFO, "gnttab_transfer: "
893 "Transferee has no reservation "
894 "headroom (%d,%d) or provided a bad grant ref (%08x) "
895 "or is dying (%lx)\n",
896 e->tot_pages, e->max_pages, gop.ref, e->domain_flags);
897 spin_unlock(&e->page_alloc_lock);
898 rcu_unlock_domain(e);
899 page->count_info &= ~(PGC_count_mask|PGC_allocated);
900 free_domheap_page(page);
901 gop.status = GNTST_general_error;
902 goto copyback;
903 }
905 /* Okay, add the page to 'e'. */
906 if ( unlikely(e->tot_pages++ == 0) )
907 get_knownalive_domain(e);
908 list_add_tail(&page->list, &e->page_list);
909 page_set_owner(page, e);
911 spin_unlock(&e->page_alloc_lock);
913 TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
915 /* Tell the guest about its new page frame. */
916 spin_lock(&e->grant_table->lock);
918 sha = &shared_entry(e->grant_table, gop.ref);
919 guest_physmap_add_page(e, sha->frame, mfn);
920 sha->frame = mfn;
921 wmb();
922 sha->flags |= GTF_transfer_completed;
924 spin_unlock(&e->grant_table->lock);
926 rcu_unlock_domain(e);
928 gop.status = GNTST_okay;
930 copyback:
931 if ( unlikely(__copy_to_guest_offset(uop, i, &gop, 1)) )
932 {
933 gdprintk(XENLOG_INFO, "gnttab_transfer: error writing resp "
934 "%d/%d\n", i, count);
935 return -EFAULT;
936 }
937 }
939 return 0;
940 }
942 /* Undo __acquire_grant_for_copy. Again, this has no effect on page
943 type and reference counts. */
944 static void
945 __release_grant_for_copy(
946 struct domain *rd, unsigned long gref, int readonly)
947 {
948 grant_entry_t *sha;
949 struct active_grant_entry *act;
950 unsigned long r_frame;
952 spin_lock(&rd->grant_table->lock);
954 act = &active_entry(rd->grant_table, gref);
955 sha = &shared_entry(rd->grant_table, gref);
956 r_frame = act->frame;
958 if ( readonly )
959 {
960 act->pin -= GNTPIN_hstr_inc;
961 }
962 else
963 {
964 gnttab_mark_dirty(rd, r_frame);
966 act->pin -= GNTPIN_hstw_inc;
967 if ( !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) )
968 gnttab_clear_flag(_GTF_writing, &sha->flags);
969 }
971 if ( !act->pin )
972 gnttab_clear_flag(_GTF_reading, &sha->flags);
974 spin_unlock(&rd->grant_table->lock);
975 }
977 /* Grab a frame number from a grant entry and update the flags and pin
978 count as appropriate. Note that this does *not* update the page
979 type or reference counts, and does not check that the mfn is
980 actually valid. */
981 static int
982 __acquire_grant_for_copy(
983 struct domain *rd, unsigned long gref, int readonly,
984 unsigned long *frame)
985 {
986 grant_entry_t *sha;
987 struct active_grant_entry *act;
988 s16 rc = GNTST_okay;
989 int retries = 0;
990 union grant_combo scombo, prev_scombo, new_scombo;
992 spin_lock(&rd->grant_table->lock);
994 if ( unlikely(gref >= nr_grant_entries(rd->grant_table)) )
995 PIN_FAIL(unlock_out, GNTST_bad_gntref,
996 "Bad grant reference %ld\n", gref);
998 act = &active_entry(rd->grant_table, gref);
999 sha = &shared_entry(rd->grant_table, gref);
1001 /* If already pinned, check the active domid and avoid refcnt overflow. */
1002 if ( act->pin &&
1003 ((act->domid != current->domain->domain_id) ||
1004 (act->pin & 0x80808080U) != 0) )
1005 PIN_FAIL(unlock_out, GNTST_general_error,
1006 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
1007 act->domid, current->domain->domain_id, act->pin);
1009 if ( !act->pin ||
1010 (!readonly && !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask))) )
1012 scombo.word = *(u32 *)&sha->flags;
1014 for ( ; ; )
1016 /* If not already pinned, check the grant domid and type. */
1017 if ( !act->pin &&
1018 (((scombo.shorts.flags & GTF_type_mask) !=
1019 GTF_permit_access) ||
1020 (scombo.shorts.domid != current->domain->domain_id)) )
1021 PIN_FAIL(unlock_out, GNTST_general_error,
1022 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
1023 scombo.shorts.flags, scombo.shorts.domid,
1024 current->domain->domain_id);
1026 new_scombo = scombo;
1027 new_scombo.shorts.flags |= GTF_reading;
1029 if ( !readonly )
1031 new_scombo.shorts.flags |= GTF_writing;
1032 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
1033 PIN_FAIL(unlock_out, GNTST_general_error,
1034 "Attempt to write-pin a r/o grant entry.\n");
1037 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
1038 scombo.word, new_scombo.word);
1039 if ( likely(prev_scombo.word == scombo.word) )
1040 break;
1042 if ( retries++ == 4 )
1043 PIN_FAIL(unlock_out, GNTST_general_error,
1044 "Shared grant entry is unstable.\n");
1046 scombo = prev_scombo;
1049 if ( !act->pin )
1051 act->domid = scombo.shorts.domid;
1052 act->frame = gmfn_to_mfn(rd, sha->frame);
1056 act->pin += readonly ? GNTPIN_hstr_inc : GNTPIN_hstw_inc;
1058 *frame = act->frame;
1060 unlock_out:
1061 spin_unlock(&rd->grant_table->lock);
1062 return rc;
1065 static void
1066 __gnttab_copy(
1067 struct gnttab_copy *op)
1069 struct domain *sd = NULL, *dd = NULL;
1070 unsigned long s_frame, d_frame;
1071 char *sp, *dp;
1072 s16 rc = GNTST_okay;
1073 int have_d_grant = 0, have_s_grant = 0, have_s_ref = 0;
1074 int src_is_gref, dest_is_gref;
1076 if ( ((op->source.offset + op->len) > PAGE_SIZE) ||
1077 ((op->dest.offset + op->len) > PAGE_SIZE) )
1078 PIN_FAIL(error_out, GNTST_bad_copy_arg, "copy beyond page area.\n");
1080 src_is_gref = op->flags & GNTCOPY_source_gref;
1081 dest_is_gref = op->flags & GNTCOPY_dest_gref;
1083 if ( (op->source.domid != DOMID_SELF && !src_is_gref ) ||
1084 (op->dest.domid != DOMID_SELF && !dest_is_gref) )
1085 PIN_FAIL(error_out, GNTST_permission_denied,
1086 "only allow copy-by-mfn for DOMID_SELF.\n");
1088 if ( op->source.domid == DOMID_SELF )
1089 sd = rcu_lock_current_domain();
1090 else if ( (sd = rcu_lock_domain_by_id(op->source.domid)) == NULL )
1091 PIN_FAIL(error_out, GNTST_bad_domain,
1092 "couldn't find %d\n", op->source.domid);
1094 if ( op->dest.domid == DOMID_SELF )
1095 dd = rcu_lock_current_domain();
1096 else if ( (dd = rcu_lock_domain_by_id(op->dest.domid)) == NULL )
1097 PIN_FAIL(error_out, GNTST_bad_domain,
1098 "couldn't find %d\n", op->dest.domid);
1100 if ( src_is_gref )
1102 rc = __acquire_grant_for_copy(sd, op->source.u.ref, 1, &s_frame);
1103 if ( rc != GNTST_okay )
1104 goto error_out;
1105 have_s_grant = 1;
1107 else
1109 s_frame = gmfn_to_mfn(sd, op->source.u.gmfn);
1111 if ( unlikely(!mfn_valid(s_frame)) )
1112 PIN_FAIL(error_out, GNTST_general_error,
1113 "source frame %lx invalid.\n", s_frame);
1114 if ( !get_page(mfn_to_page(s_frame), sd) )
1116 if ( !test_bit(_DOMF_dying, &sd->domain_flags) )
1117 gdprintk(XENLOG_WARNING, "Could not get src frame %lx\n", s_frame);
1118 rc = GNTST_general_error;
1119 goto error_out;
1121 have_s_ref = 1;
1123 if ( dest_is_gref )
1125 rc = __acquire_grant_for_copy(dd, op->dest.u.ref, 0, &d_frame);
1126 if ( rc != GNTST_okay )
1127 goto error_out;
1128 have_d_grant = 1;
1130 else
1132 d_frame = gmfn_to_mfn(dd, op->dest.u.gmfn);
1134 if ( unlikely(!mfn_valid(d_frame)) )
1135 PIN_FAIL(error_out, GNTST_general_error,
1136 "destination frame %lx invalid.\n", d_frame);
1137 if ( !get_page_and_type(mfn_to_page(d_frame), dd, PGT_writable_page) )
1139 if ( !test_bit(_DOMF_dying, &dd->domain_flags) )
1140 gdprintk(XENLOG_WARNING, "Could not get dst frame %lx\n", d_frame);
1141 rc = GNTST_general_error;
1142 goto error_out;
1145 sp = map_domain_page(s_frame);
1146 dp = map_domain_page(d_frame);
1148 memcpy(dp + op->dest.offset, sp + op->source.offset, op->len);
1150 unmap_domain_page(dp);
1151 unmap_domain_page(sp);
1153 gnttab_mark_dirty(dd, d_frame);
1155 put_page_and_type(mfn_to_page(d_frame));
1156 error_out:
1157 if ( have_s_ref )
1158 put_page(mfn_to_page(s_frame));
1159 if ( have_s_grant )
1160 __release_grant_for_copy(sd, op->source.u.ref, 1);
1161 if ( have_d_grant )
1162 __release_grant_for_copy(dd, op->dest.u.ref, 0);
1163 if ( sd )
1164 rcu_unlock_domain(sd);
1165 if ( dd )
1166 rcu_unlock_domain(dd);
1167 op->status = rc;
1170 static long
1171 gnttab_copy(
1172 XEN_GUEST_HANDLE(gnttab_copy_t) uop, unsigned int count)
1174 int i;
1175 struct gnttab_copy op;
1177 for ( i = 0; i < count; i++ )
1179 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
1180 return -EFAULT;
1181 __gnttab_copy(&op);
1182 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
1183 return -EFAULT;
1185 return 0;
1188 long
1189 do_grant_table_op(
1190 unsigned int cmd, XEN_GUEST_HANDLE(void) uop, unsigned int count)
1192 long rc;
1193 struct domain *d = current->domain;
1195 if ( count > 512 )
1196 return -EINVAL;
1198 LOCK_BIGLOCK(d);
1200 rc = -EFAULT;
1201 switch ( cmd )
1203 case GNTTABOP_map_grant_ref:
1205 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) map =
1206 guest_handle_cast(uop, gnttab_map_grant_ref_t);
1207 if ( unlikely(!guest_handle_okay(map, count)) )
1208 goto out;
1209 rc = -EPERM;
1210 if ( unlikely(!grant_operation_permitted(d)) )
1211 goto out;
1212 rc = gnttab_map_grant_ref(map, count);
1213 break;
1215 case GNTTABOP_unmap_grant_ref:
1217 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) unmap =
1218 guest_handle_cast(uop, gnttab_unmap_grant_ref_t);
1219 if ( unlikely(!guest_handle_okay(unmap, count)) )
1220 goto out;
1221 rc = -EPERM;
1222 if ( unlikely(!grant_operation_permitted(d)) )
1223 goto out;
1224 rc = gnttab_unmap_grant_ref(unmap, count);
1225 break;
1227 case GNTTABOP_setup_table:
1229 rc = gnttab_setup_table(
1230 guest_handle_cast(uop, gnttab_setup_table_t), count);
1231 break;
1233 case GNTTABOP_transfer:
1235 XEN_GUEST_HANDLE(gnttab_transfer_t) transfer =
1236 guest_handle_cast(uop, gnttab_transfer_t);
1237 if ( unlikely(!guest_handle_okay(transfer, count)) )
1238 goto out;
1239 rc = -EPERM;
1240 if ( unlikely(!grant_operation_permitted(d)) )
1241 goto out;
1242 rc = gnttab_transfer(transfer, count);
1243 break;
1245 case GNTTABOP_copy:
1247 XEN_GUEST_HANDLE(gnttab_copy_t) copy =
1248 guest_handle_cast(uop, gnttab_copy_t);
1249 if ( unlikely(!guest_handle_okay(copy, count)) )
1250 goto out;
1251 rc = gnttab_copy(copy, count);
1252 break;
1254 case GNTTABOP_query_size:
1256 rc = gnttab_query_size(
1257 guest_handle_cast(uop, gnttab_query_size_t), count);
1258 break;
1260 default:
1261 rc = -ENOSYS;
1262 break;
1265 out:
1266 UNLOCK_BIGLOCK(d);
1268 return rc;
1271 #ifdef CONFIG_COMPAT
1272 #include "compat/grant_table.c"
1273 #endif
1275 static unsigned int max_nr_active_grant_frames(void)
1277 return (((max_nr_grant_frames * (PAGE_SIZE / sizeof(grant_entry_t))) +
1278 ((PAGE_SIZE / sizeof(struct active_grant_entry))-1))
1279 / (PAGE_SIZE / sizeof(struct active_grant_entry)));
1282 int
1283 grant_table_create(
1284 struct domain *d)
1286 struct grant_table *t;
1287 int i;
1289 /* If this sizeof assertion fails, fix the function: shared_index */
1290 ASSERT(sizeof(grant_entry_t) == 8);
1292 if ( (t = xmalloc(struct grant_table)) == NULL )
1293 goto no_mem_0;
1295 /* Simple stuff. */
1296 memset(t, 0, sizeof(*t));
1297 spin_lock_init(&t->lock);
1298 t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
1300 /* Active grant table. */
1301 if ( (t->active = xmalloc_array(struct active_grant_entry *,
1302 max_nr_active_grant_frames())) == NULL )
1303 goto no_mem_1;
1304 memset(t->active, 0, max_nr_active_grant_frames() * sizeof(t->active[0]));
1305 for ( i = 0;
1306 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1308 if ( (t->active[i] = alloc_xenheap_page()) == NULL )
1309 goto no_mem_2;
1310 memset(t->active[i], 0, PAGE_SIZE);
1313 /* Tracking of mapped foreign frames table */
1314 if ( (t->maptrack = xmalloc_array(struct grant_mapping *,
1315 max_nr_maptrack_frames())) == NULL )
1316 goto no_mem_2;
1317 memset(t->maptrack, 0, max_nr_maptrack_frames() * sizeof(t->maptrack[0]));
1318 if ( (t->maptrack[0] = alloc_xenheap_page()) == NULL )
1319 goto no_mem_3;
1320 memset(t->maptrack[0], 0, PAGE_SIZE);
1321 t->maptrack_limit = PAGE_SIZE / sizeof(struct grant_mapping);
1322 for ( i = 0; i < t->maptrack_limit; i++ )
1323 t->maptrack[0][i].ref = i+1;
1325 /* Shared grant table. */
1326 if ( (t->shared = xmalloc_array(struct grant_entry *,
1327 max_nr_grant_frames)) == NULL )
1328 goto no_mem_3;
1329 memset(t->shared, 0, max_nr_grant_frames * sizeof(t->shared[0]));
1330 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1332 if ( (t->shared[i] = alloc_xenheap_page()) == NULL )
1333 goto no_mem_4;
1334 memset(t->shared[i], 0, PAGE_SIZE);
1337 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1338 gnttab_create_shared_page(d, t, i);
1340 /* Okay, install the structure. */
1341 d->grant_table = t;
1342 return 0;
1344 no_mem_4:
1345 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1346 free_xenheap_page(t->shared[i]);
1347 xfree(t->shared);
1348 no_mem_3:
1349 free_xenheap_page(t->maptrack[0]);
1350 xfree(t->maptrack);
1351 no_mem_2:
1352 for ( i = 0;
1353 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1354 free_xenheap_page(t->active[i]);
1355 xfree(t->active);
1356 no_mem_1:
1357 xfree(t);
1358 no_mem_0:
1359 return -ENOMEM;
1362 void
1363 gnttab_release_mappings(
1364 struct domain *d)
1366 struct grant_table *gt = d->grant_table;
1367 struct grant_mapping *map;
1368 grant_ref_t ref;
1369 grant_handle_t handle;
1370 struct domain *rd;
1371 struct active_grant_entry *act;
1372 struct grant_entry *sha;
1374 BUG_ON(!test_bit(_DOMF_dying, &d->domain_flags));
1376 for ( handle = 0; handle < gt->maptrack_limit; handle++ )
1378 map = &maptrack_entry(gt, handle);
1379 if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
1380 continue;
1382 ref = map->ref;
1384 gdprintk(XENLOG_INFO, "Grant release (%hu) ref:(%hu) "
1385 "flags:(%x) dom:(%hu)\n",
1386 handle, ref, map->flags, map->domid);
1388 rd = rcu_lock_domain_by_id(map->domid);
1389 if ( rd == NULL )
1391 /* Nothing to clear up... */
1392 map->flags = 0;
1393 continue;
1396 spin_lock(&rd->grant_table->lock);
1398 act = &active_entry(rd->grant_table, ref);
1399 sha = &shared_entry(rd->grant_table, ref);
1401 if ( map->flags & GNTMAP_readonly )
1403 if ( map->flags & GNTMAP_device_map )
1405 BUG_ON(!(act->pin & GNTPIN_devr_mask));
1406 act->pin -= GNTPIN_devr_inc;
1407 put_page(mfn_to_page(act->frame));
1410 if ( map->flags & GNTMAP_host_map )
1412 BUG_ON(!(act->pin & GNTPIN_hstr_mask));
1413 act->pin -= GNTPIN_hstr_inc;
1414 /* Done implicitly when page tables are destroyed. */
1415 /* put_page(mfn_to_page(act->frame)); */
1418 else
1420 if ( map->flags & GNTMAP_device_map )
1422 BUG_ON(!(act->pin & GNTPIN_devw_mask));
1423 act->pin -= GNTPIN_devw_inc;
1424 put_page_and_type(mfn_to_page(act->frame));
1427 if ( map->flags & GNTMAP_host_map )
1429 BUG_ON(!(act->pin & GNTPIN_hstw_mask));
1430 act->pin -= GNTPIN_hstw_inc;
1431 /* Done implicitly when page tables are destroyed. */
1432 /* put_page_and_type(mfn_to_page(act->frame)); */
1435 if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
1436 gnttab_clear_flag(_GTF_writing, &sha->flags);
1439 if ( act->pin == 0 )
1440 gnttab_clear_flag(_GTF_reading, &sha->flags);
1442 spin_unlock(&rd->grant_table->lock);
1444 rcu_unlock_domain(rd);
1446 map->flags = 0;
1451 void
1452 grant_table_destroy(
1453 struct domain *d)
1455 struct grant_table *t = d->grant_table;
1456 int i;
1458 if ( t == NULL )
1459 return;
1461 for ( i = 0; i < nr_grant_frames(t); i++ )
1462 free_xenheap_page(t->shared[i]);
1463 xfree(t->shared);
1465 for ( i = 0; i < nr_maptrack_frames(t); i++ )
1466 free_xenheap_page(t->maptrack[i]);
1467 xfree(t->maptrack);
1469 for ( i = 0; i < nr_active_grant_frames(t); i++ )
1470 free_xenheap_page(t->active[i]);
1471 xfree(t->active);
1473 xfree(t);
1474 d->grant_table = NULL;
1477 /*
1478 * Local variables:
1479 * mode: C
1480 * c-set-style: "BSD"
1481 * c-basic-offset: 4
1482 * tab-width: 4
1483 * indent-tabs-mode: nil
1484 * End:
1485 */