ia64/xen-unstable

view xen/common/grant_table.c @ 15896:42d4313b5fdd

[IA64] update .hgignore for xenitp

Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author Alex Williamson <alex.williamson@hp.com>
date Mon Sep 24 14:21:02 2007 -0600 (2007-09-24)
parents fa4d44c9d9f6
children 48d42d659a04
line source
1 /******************************************************************************
2 * common/grant_table.c
3 *
4 * Mechanism for granting foreign access to page frames, and receiving
5 * page-ownership transfers.
6 *
7 * Copyright (c) 2005-2006 Christopher Clark
8 * Copyright (c) 2004 K A Fraser
9 * Copyright (c) 2005 Andrew Warfield
10 * Modifications by Geoffrey Lefebvre are (c) Intel Research Cambridge
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
27 #include <xen/config.h>
28 #include <xen/iocap.h>
29 #include <xen/lib.h>
30 #include <xen/sched.h>
31 #include <xen/mm.h>
32 #include <xen/trace.h>
33 #include <xen/guest_access.h>
34 #include <xen/domain_page.h>
35 #include <xsm/xsm.h>
37 #ifndef max_nr_grant_frames
38 unsigned int max_nr_grant_frames = DEFAULT_MAX_NR_GRANT_FRAMES;
39 integer_param("gnttab_max_nr_frames", max_nr_grant_frames);
40 #endif
42 /* The maximum number of grant mappings is defined as a multiplier of the
43 * maximum number of grant table entries. This defines the multiplier used.
44 * Pretty arbitrary. [POLICY]
45 */
46 #define MAX_MAPTRACK_TO_GRANTS_RATIO 8
48 /*
49 * The first two members of a grant entry are updated as a combined pair.
50 * The following union allows that to happen in an endian-neutral fashion.
51 */
52 union grant_combo {
53 uint32_t word;
54 struct {
55 uint16_t flags;
56 domid_t domid;
57 } shorts;
58 };
60 /* Used to share code between unmap_grant_ref and unmap_and_replace. */
61 struct gnttab_unmap_common {
62 uint64_t host_addr;
63 uint64_t dev_bus_addr;
64 uint64_t new_addr;
65 grant_handle_t handle;
67 int16_t status;
68 };
70 #define PIN_FAIL(_lbl, _rc, _f, _a...) \
71 do { \
72 gdprintk(XENLOG_WARNING, _f, ## _a ); \
73 rc = (_rc); \
74 goto _lbl; \
75 } while ( 0 )
77 #define MAPTRACK_PER_PAGE (PAGE_SIZE / sizeof(struct grant_mapping))
78 #define maptrack_entry(t, e) \
79 ((t)->maptrack[(e)/MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE])
81 static inline unsigned int
82 nr_maptrack_frames(struct grant_table *t)
83 {
84 return t->maptrack_limit / MAPTRACK_PER_PAGE;
85 }
87 static unsigned inline int max_nr_maptrack_frames(void)
88 {
89 return (max_nr_grant_frames * MAX_MAPTRACK_TO_GRANTS_RATIO);
90 }
93 #define SHGNT_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_t))
94 #define shared_entry(t, e) \
95 ((t)->shared[(e)/SHGNT_PER_PAGE][(e)%SHGNT_PER_PAGE])
96 #define ACGNT_PER_PAGE (PAGE_SIZE / sizeof(struct active_grant_entry))
97 #define active_entry(t, e) \
98 ((t)->active[(e)/ACGNT_PER_PAGE][(e)%ACGNT_PER_PAGE])
100 static inline int
101 __get_maptrack_handle(
102 struct grant_table *t)
103 {
104 unsigned int h;
105 if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) )
106 return -1;
107 t->maptrack_head = maptrack_entry(t, h).ref;
108 t->map_count++;
109 return h;
110 }
112 static inline void
113 put_maptrack_handle(
114 struct grant_table *t, int handle)
115 {
116 maptrack_entry(t, handle).ref = t->maptrack_head;
117 t->maptrack_head = handle;
118 t->map_count--;
119 }
121 static inline int
122 get_maptrack_handle(
123 struct grant_table *lgt)
124 {
125 int i;
126 grant_handle_t handle;
127 struct grant_mapping *new_mt;
128 unsigned int new_mt_limit, nr_frames;
130 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
131 {
132 spin_lock(&lgt->lock);
134 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
135 {
136 nr_frames = nr_maptrack_frames(lgt);
137 if ( nr_frames >= max_nr_maptrack_frames() )
138 {
139 spin_unlock(&lgt->lock);
140 return -1;
141 }
143 new_mt = alloc_xenheap_page();
144 if ( new_mt == NULL )
145 {
146 spin_unlock(&lgt->lock);
147 return -1;
148 }
150 clear_page(new_mt);
152 new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE;
154 for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
155 {
156 new_mt[i % MAPTRACK_PER_PAGE].ref = i+1;
157 new_mt[i % MAPTRACK_PER_PAGE].flags = 0;
158 }
160 lgt->maptrack[nr_frames] = new_mt;
161 lgt->maptrack_limit = new_mt_limit;
163 gdprintk(XENLOG_INFO,
164 "Increased maptrack size to %u frames.\n", nr_frames + 1);
165 handle = __get_maptrack_handle(lgt);
166 }
168 spin_unlock(&lgt->lock);
169 }
170 return handle;
171 }
173 /*
174 * Returns 0 if TLB flush / invalidate required by caller.
175 * va will indicate the address to be invalidated.
176 *
177 * addr is _either_ a host virtual address, or the address of the pte to
178 * update, as indicated by the GNTMAP_contains_pte flag.
179 */
180 static void
181 __gnttab_map_grant_ref(
182 struct gnttab_map_grant_ref *op)
183 {
184 struct domain *ld, *rd;
185 struct vcpu *led;
186 int handle;
187 unsigned long frame = 0;
188 int rc = GNTST_okay;
189 struct active_grant_entry *act;
190 struct grant_mapping *mt;
191 grant_entry_t *sha;
192 union grant_combo scombo, prev_scombo, new_scombo;
194 /*
195 * We bound the number of times we retry CMPXCHG on memory locations that
196 * we share with a guest OS. The reason is that the guest can modify that
197 * location at a higher rate than we can read-modify-CMPXCHG, so the guest
198 * could cause us to livelock. There are a few cases where it is valid for
199 * the guest to race our updates (e.g., to change the GTF_readonly flag),
200 * so we allow a few retries before failing.
201 */
202 int retries = 0;
204 led = current;
205 ld = led->domain;
207 if ( unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) )
208 {
209 gdprintk(XENLOG_INFO, "Bad flags in grant map op (%x).\n", op->flags);
210 op->status = GNTST_bad_gntref;
211 return;
212 }
214 if ( unlikely((rd = rcu_lock_domain_by_id(op->dom)) == NULL) )
215 {
216 gdprintk(XENLOG_INFO, "Could not find domain %d\n", op->dom);
217 op->status = GNTST_bad_domain;
218 return;
219 }
221 rc = xsm_grant_mapref(ld, rd, op->flags);
222 if ( rc )
223 {
224 rcu_unlock_domain(rd);
225 op->status = GNTST_permission_denied;
226 return;
227 }
229 if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) )
230 {
231 rcu_unlock_domain(rd);
232 gdprintk(XENLOG_INFO, "Failed to obtain maptrack handle.\n");
233 op->status = GNTST_no_device_space;
234 return;
235 }
237 spin_lock(&rd->grant_table->lock);
239 /* Bounds check on the grant ref */
240 if ( unlikely(op->ref >= nr_grant_entries(rd->grant_table)))
241 PIN_FAIL(unlock_out, GNTST_bad_gntref, "Bad ref (%d).\n", op->ref);
243 act = &active_entry(rd->grant_table, op->ref);
244 sha = &shared_entry(rd->grant_table, op->ref);
246 /* If already pinned, check the active domid and avoid refcnt overflow. */
247 if ( act->pin &&
248 ((act->domid != ld->domain_id) ||
249 (act->pin & 0x80808080U) != 0) )
250 PIN_FAIL(unlock_out, GNTST_general_error,
251 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
252 act->domid, ld->domain_id, act->pin);
254 if ( !act->pin ||
255 (!(op->flags & GNTMAP_readonly) &&
256 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask))) )
257 {
258 scombo.word = *(u32 *)&sha->flags;
260 /*
261 * This loop attempts to set the access (reading/writing) flags
262 * in the grant table entry. It tries a cmpxchg on the field
263 * up to five times, and then fails under the assumption that
264 * the guest is misbehaving.
265 */
266 for ( ; ; )
267 {
268 /* If not already pinned, check the grant domid and type. */
269 if ( !act->pin &&
270 (((scombo.shorts.flags & GTF_type_mask) !=
271 GTF_permit_access) ||
272 (scombo.shorts.domid != ld->domain_id)) )
273 PIN_FAIL(unlock_out, GNTST_general_error,
274 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
275 scombo.shorts.flags, scombo.shorts.domid,
276 ld->domain_id);
278 new_scombo = scombo;
279 new_scombo.shorts.flags |= GTF_reading;
281 if ( !(op->flags & GNTMAP_readonly) )
282 {
283 new_scombo.shorts.flags |= GTF_writing;
284 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
285 PIN_FAIL(unlock_out, GNTST_general_error,
286 "Attempt to write-pin a r/o grant entry.\n");
287 }
289 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
290 scombo.word, new_scombo.word);
291 if ( likely(prev_scombo.word == scombo.word) )
292 break;
294 if ( retries++ == 4 )
295 PIN_FAIL(unlock_out, GNTST_general_error,
296 "Shared grant entry is unstable.\n");
298 scombo = prev_scombo;
299 }
301 if ( !act->pin )
302 {
303 act->domid = scombo.shorts.domid;
304 act->frame = gmfn_to_mfn(rd, sha->frame);
305 }
306 }
308 if ( op->flags & GNTMAP_device_map )
309 act->pin += (op->flags & GNTMAP_readonly) ?
310 GNTPIN_devr_inc : GNTPIN_devw_inc;
311 if ( op->flags & GNTMAP_host_map )
312 act->pin += (op->flags & GNTMAP_readonly) ?
313 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
315 frame = act->frame;
317 spin_unlock(&rd->grant_table->lock);
319 if ( unlikely(!mfn_valid(frame)) ||
320 unlikely(!((op->flags & GNTMAP_readonly) ?
321 get_page(mfn_to_page(frame), rd) :
322 get_page_and_type(mfn_to_page(frame), rd,
323 PGT_writable_page))) )
324 {
325 if ( !rd->is_dying )
326 gdprintk(XENLOG_WARNING, "Could not pin grant frame %lx\n", frame);
327 rc = GNTST_general_error;
328 goto undo_out;
329 }
331 if ( op->flags & GNTMAP_host_map )
332 {
333 rc = create_grant_host_mapping(op->host_addr, frame, op->flags);
334 if ( rc != GNTST_okay )
335 {
336 if ( !(op->flags & GNTMAP_readonly) )
337 put_page_type(mfn_to_page(frame));
338 put_page(mfn_to_page(frame));
339 goto undo_out;
340 }
342 if ( op->flags & GNTMAP_device_map )
343 {
344 (void)get_page(mfn_to_page(frame), rd);
345 if ( !(op->flags & GNTMAP_readonly) )
346 get_page_type(mfn_to_page(frame), PGT_writable_page);
347 }
348 }
350 TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
352 mt = &maptrack_entry(ld->grant_table, handle);
353 mt->domid = op->dom;
354 mt->ref = op->ref;
355 mt->flags = op->flags;
357 op->dev_bus_addr = (u64)frame << PAGE_SHIFT;
358 op->handle = handle;
359 op->status = GNTST_okay;
361 rcu_unlock_domain(rd);
362 return;
364 undo_out:
365 spin_lock(&rd->grant_table->lock);
367 act = &active_entry(rd->grant_table, op->ref);
368 sha = &shared_entry(rd->grant_table, op->ref);
370 if ( op->flags & GNTMAP_device_map )
371 act->pin -= (op->flags & GNTMAP_readonly) ?
372 GNTPIN_devr_inc : GNTPIN_devw_inc;
373 if ( op->flags & GNTMAP_host_map )
374 act->pin -= (op->flags & GNTMAP_readonly) ?
375 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
377 if ( !(op->flags & GNTMAP_readonly) &&
378 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
379 gnttab_clear_flag(_GTF_writing, &sha->flags);
381 if ( !act->pin )
382 gnttab_clear_flag(_GTF_reading, &sha->flags);
384 unlock_out:
385 spin_unlock(&rd->grant_table->lock);
386 op->status = rc;
387 put_maptrack_handle(ld->grant_table, handle);
388 rcu_unlock_domain(rd);
389 }
391 static long
392 gnttab_map_grant_ref(
393 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) uop, unsigned int count)
394 {
395 int i;
396 struct gnttab_map_grant_ref op;
398 for ( i = 0; i < count; i++ )
399 {
400 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
401 return -EFAULT;
402 __gnttab_map_grant_ref(&op);
403 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
404 return -EFAULT;
405 }
407 return 0;
408 }
410 static void
411 __gnttab_unmap_common(
412 struct gnttab_unmap_common *op)
413 {
414 domid_t dom;
415 grant_ref_t ref;
416 struct domain *ld, *rd;
417 struct active_grant_entry *act;
418 grant_entry_t *sha;
419 struct grant_mapping *map;
420 u16 flags;
421 s16 rc = 0;
422 unsigned long frame;
424 ld = current->domain;
426 frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT);
428 if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) )
429 {
430 gdprintk(XENLOG_INFO, "Bad handle (%d).\n", op->handle);
431 op->status = GNTST_bad_handle;
432 return;
433 }
435 map = &maptrack_entry(ld->grant_table, op->handle);
437 if ( unlikely(!map->flags) )
438 {
439 gdprintk(XENLOG_INFO, "Zero flags for handle (%d).\n", op->handle);
440 op->status = GNTST_bad_handle;
441 return;
442 }
444 dom = map->domid;
445 ref = map->ref;
446 flags = map->flags;
448 if ( unlikely((rd = rcu_lock_domain_by_id(dom)) == NULL) )
449 {
450 /* This can happen when a grant is implicitly unmapped. */
451 gdprintk(XENLOG_INFO, "Could not find domain %d\n", dom);
452 domain_crash(ld); /* naughty... */
453 return;
454 }
456 rc = xsm_grant_unmapref(ld, rd);
457 if ( rc )
458 {
459 rcu_unlock_domain(rd);
460 op->status = GNTST_permission_denied;
461 return;
462 }
464 TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom);
466 spin_lock(&rd->grant_table->lock);
468 act = &active_entry(rd->grant_table, ref);
469 sha = &shared_entry(rd->grant_table, ref);
471 if ( frame == 0 )
472 {
473 frame = act->frame;
474 }
475 else
476 {
477 if ( unlikely(frame != act->frame) )
478 PIN_FAIL(unmap_out, GNTST_general_error,
479 "Bad frame number doesn't match gntref.\n");
480 if ( flags & GNTMAP_device_map )
481 {
482 ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
483 map->flags &= ~GNTMAP_device_map;
484 if ( flags & GNTMAP_readonly )
485 {
486 act->pin -= GNTPIN_devr_inc;
487 put_page(mfn_to_page(frame));
488 }
489 else
490 {
491 act->pin -= GNTPIN_devw_inc;
492 put_page_and_type(mfn_to_page(frame));
493 }
494 }
495 }
497 if ( (op->host_addr != 0) && (flags & GNTMAP_host_map) )
498 {
499 if ( (rc = replace_grant_host_mapping(op->host_addr,
500 frame, op->new_addr, flags)) < 0 )
501 goto unmap_out;
503 ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
504 map->flags &= ~GNTMAP_host_map;
505 if ( flags & GNTMAP_readonly )
506 {
507 act->pin -= GNTPIN_hstr_inc;
508 put_page(mfn_to_page(frame));
509 }
510 else
511 {
512 act->pin -= GNTPIN_hstw_inc;
513 put_page_and_type(mfn_to_page(frame));
514 }
515 }
517 if ( (map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
518 {
519 map->flags = 0;
520 put_maptrack_handle(ld->grant_table, op->handle);
521 }
523 /* If just unmapped a writable mapping, mark as dirtied */
524 if ( !(flags & GNTMAP_readonly) )
525 gnttab_mark_dirty(rd, frame);
527 if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
528 !(flags & GNTMAP_readonly) )
529 gnttab_clear_flag(_GTF_writing, &sha->flags);
531 if ( act->pin == 0 )
532 gnttab_clear_flag(_GTF_reading, &sha->flags);
534 unmap_out:
535 op->status = rc;
536 spin_unlock(&rd->grant_table->lock);
537 rcu_unlock_domain(rd);
538 }
540 static void
541 __gnttab_unmap_grant_ref(
542 struct gnttab_unmap_grant_ref *op)
543 {
544 struct gnttab_unmap_common common = {
545 .host_addr = op->host_addr,
546 .dev_bus_addr = op->dev_bus_addr,
547 .handle = op->handle,
548 };
550 __gnttab_unmap_common(&common);
551 op->status = common.status;
552 }
554 static long
555 gnttab_unmap_grant_ref(
556 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) uop, unsigned int count)
557 {
558 int i;
559 struct gnttab_unmap_grant_ref op;
561 for ( i = 0; i < count; i++ )
562 {
563 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
564 goto fault;
565 __gnttab_unmap_grant_ref(&op);
566 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
567 goto fault;
568 }
570 flush_tlb_mask(current->domain->domain_dirty_cpumask);
571 return 0;
573 fault:
574 flush_tlb_mask(current->domain->domain_dirty_cpumask);
575 return -EFAULT;
576 }
578 static void
579 __gnttab_unmap_and_replace(
580 struct gnttab_unmap_and_replace *op)
581 {
582 struct gnttab_unmap_common common = {
583 .host_addr = op->host_addr,
584 .new_addr = op->new_addr,
585 .handle = op->handle,
586 };
588 __gnttab_unmap_common(&common);
589 op->status = common.status;
590 }
592 static long
593 gnttab_unmap_and_replace(
594 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) uop, unsigned int count)
595 {
596 int i;
597 struct gnttab_unmap_and_replace op;
599 for ( i = 0; i < count; i++ )
600 {
601 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
602 goto fault;
603 __gnttab_unmap_and_replace(&op);
604 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
605 goto fault;
606 }
608 flush_tlb_mask(current->domain->domain_dirty_cpumask);
609 return 0;
611 fault:
612 flush_tlb_mask(current->domain->domain_dirty_cpumask);
613 return -EFAULT;
614 }
616 int
617 gnttab_grow_table(struct domain *d, unsigned int req_nr_frames)
618 {
619 /* d's grant table lock must be held by the caller */
621 struct grant_table *gt = d->grant_table;
622 unsigned int i;
624 ASSERT(req_nr_frames <= max_nr_grant_frames);
626 gdprintk(XENLOG_INFO,
627 "Expanding dom (%d) grant table from (%d) to (%d) frames.\n",
628 d->domain_id, nr_grant_frames(gt), req_nr_frames);
630 /* Active */
631 for ( i = nr_active_grant_frames(gt);
632 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
633 {
634 if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
635 goto active_alloc_failed;
636 clear_page(gt->active[i]);
637 }
639 /* Shared */
640 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
641 {
642 if ( (gt->shared[i] = alloc_xenheap_page()) == NULL )
643 goto shared_alloc_failed;
644 clear_page(gt->shared[i]);
645 }
647 /* Share the new shared frames with the recipient domain */
648 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
649 gnttab_create_shared_page(d, gt, i);
651 gt->nr_grant_frames = req_nr_frames;
653 return 1;
655 shared_alloc_failed:
656 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
657 {
658 free_xenheap_page(gt->shared[i]);
659 gt->shared[i] = NULL;
660 }
661 active_alloc_failed:
662 for ( i = nr_active_grant_frames(gt);
663 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
664 {
665 free_xenheap_page(gt->active[i]);
666 gt->active[i] = NULL;
667 }
668 gdprintk(XENLOG_INFO, "Allocation failure when expanding grant table.\n");
669 return 0;
670 }
672 static long
673 gnttab_setup_table(
674 XEN_GUEST_HANDLE(gnttab_setup_table_t) uop, unsigned int count)
675 {
676 struct gnttab_setup_table op;
677 struct domain *d;
678 int i;
679 unsigned long gmfn;
680 domid_t dom;
682 if ( count != 1 )
683 return -EINVAL;
685 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
686 {
687 gdprintk(XENLOG_INFO, "Fault while reading gnttab_setup_table_t.\n");
688 return -EFAULT;
689 }
691 if ( unlikely(op.nr_frames > max_nr_grant_frames) )
692 {
693 gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table frames"
694 " per domain.\n",
695 max_nr_grant_frames);
696 op.status = GNTST_general_error;
697 goto out;
698 }
700 dom = op.dom;
701 if ( dom == DOMID_SELF )
702 {
703 dom = current->domain->domain_id;
704 }
705 else if ( unlikely(!IS_PRIV(current->domain)) )
706 {
707 op.status = GNTST_permission_denied;
708 goto out;
709 }
711 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
712 {
713 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
714 op.status = GNTST_bad_domain;
715 goto out;
716 }
718 if ( xsm_grant_setup(current->domain, d) )
719 {
720 rcu_unlock_domain(d);
721 op.status = GNTST_permission_denied;
722 goto out;
723 }
725 spin_lock(&d->grant_table->lock);
727 if ( (op.nr_frames > nr_grant_frames(d->grant_table)) &&
728 !gnttab_grow_table(d, op.nr_frames) )
729 {
730 gdprintk(XENLOG_INFO,
731 "Expand grant table to %d failed. Current: %d Max: %d.\n",
732 op.nr_frames,
733 nr_grant_frames(d->grant_table),
734 max_nr_grant_frames);
735 op.status = GNTST_general_error;
736 goto setup_unlock_out;
737 }
739 op.status = GNTST_okay;
740 for ( i = 0; i < op.nr_frames; i++ )
741 {
742 gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
743 (void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
744 }
746 setup_unlock_out:
747 spin_unlock(&d->grant_table->lock);
749 rcu_unlock_domain(d);
751 out:
752 if ( unlikely(copy_to_guest(uop, &op, 1)) )
753 return -EFAULT;
755 return 0;
756 }
758 static long
759 gnttab_query_size(
760 XEN_GUEST_HANDLE(gnttab_query_size_t) uop, unsigned int count)
761 {
762 struct gnttab_query_size op;
763 struct domain *d;
764 domid_t dom;
765 int rc;
767 if ( count != 1 )
768 return -EINVAL;
770 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
771 {
772 gdprintk(XENLOG_INFO, "Fault while reading gnttab_query_size_t.\n");
773 return -EFAULT;
774 }
776 dom = op.dom;
777 if ( dom == DOMID_SELF )
778 {
779 dom = current->domain->domain_id;
780 }
781 else if ( unlikely(!IS_PRIV(current->domain)) )
782 {
783 op.status = GNTST_permission_denied;
784 goto query_out;
785 }
787 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
788 {
789 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
790 op.status = GNTST_bad_domain;
791 goto query_out;
792 }
794 rc = xsm_grant_query_size(current->domain, d);
795 if ( rc )
796 {
797 rcu_unlock_domain(d);
798 op.status = GNTST_permission_denied;
799 goto query_out;
800 }
802 spin_lock(&d->grant_table->lock);
804 op.nr_frames = nr_grant_frames(d->grant_table);
805 op.max_nr_frames = max_nr_grant_frames;
806 op.status = GNTST_okay;
808 spin_unlock(&d->grant_table->lock);
810 rcu_unlock_domain(d);
812 query_out:
813 if ( unlikely(copy_to_guest(uop, &op, 1)) )
814 return -EFAULT;
816 return 0;
817 }
819 /*
820 * Check that the given grant reference (rd,ref) allows 'ld' to transfer
821 * ownership of a page frame. If so, lock down the grant entry.
822 */
823 static int
824 gnttab_prepare_for_transfer(
825 struct domain *rd, struct domain *ld, grant_ref_t ref)
826 {
827 struct grant_table *rgt;
828 struct grant_entry *sha;
829 union grant_combo scombo, prev_scombo, new_scombo;
830 int retries = 0;
832 if ( unlikely((rgt = rd->grant_table) == NULL) )
833 {
834 gdprintk(XENLOG_INFO, "Dom %d has no grant table.\n", rd->domain_id);
835 return 0;
836 }
838 spin_lock(&rgt->lock);
840 if ( unlikely(ref >= nr_grant_entries(rd->grant_table)) )
841 {
842 gdprintk(XENLOG_INFO,
843 "Bad grant reference (%d) for transfer to domain(%d).\n",
844 ref, rd->domain_id);
845 goto fail;
846 }
848 sha = &shared_entry(rgt, ref);
850 scombo.word = *(u32 *)&sha->flags;
852 for ( ; ; )
853 {
854 if ( unlikely(scombo.shorts.flags != GTF_accept_transfer) ||
855 unlikely(scombo.shorts.domid != ld->domain_id) )
856 {
857 gdprintk(XENLOG_INFO, "Bad flags (%x) or dom (%d). "
858 "(NB. expected dom %d)\n",
859 scombo.shorts.flags, scombo.shorts.domid,
860 ld->domain_id);
861 goto fail;
862 }
864 new_scombo = scombo;
865 new_scombo.shorts.flags |= GTF_transfer_committed;
867 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
868 scombo.word, new_scombo.word);
869 if ( likely(prev_scombo.word == scombo.word) )
870 break;
872 if ( retries++ == 4 )
873 {
874 gdprintk(XENLOG_WARNING, "Shared grant entry is unstable.\n");
875 goto fail;
876 }
878 scombo = prev_scombo;
879 }
881 spin_unlock(&rgt->lock);
882 return 1;
884 fail:
885 spin_unlock(&rgt->lock);
886 return 0;
887 }
889 static long
890 gnttab_transfer(
891 XEN_GUEST_HANDLE(gnttab_transfer_t) uop, unsigned int count)
892 {
893 struct domain *d = current->domain;
894 struct domain *e;
895 struct page_info *page;
896 int i;
897 grant_entry_t *sha;
898 struct gnttab_transfer gop;
899 unsigned long mfn;
901 for ( i = 0; i < count; i++ )
902 {
903 /* Read from caller address space. */
904 if ( unlikely(__copy_from_guest_offset(&gop, uop, i, 1)) )
905 {
906 gdprintk(XENLOG_INFO, "gnttab_transfer: error reading req %d/%d\n",
907 i, count);
908 return -EFAULT;
909 }
911 mfn = gmfn_to_mfn(d, gop.mfn);
913 /* Check the passed page frame for basic validity. */
914 if ( unlikely(!mfn_valid(mfn)) )
915 {
916 gdprintk(XENLOG_INFO, "gnttab_transfer: out-of-range %lx\n",
917 (unsigned long)gop.mfn);
918 gop.status = GNTST_bad_page;
919 goto copyback;
920 }
922 page = mfn_to_page(mfn);
923 if ( unlikely(is_xen_heap_frame(page)) )
924 {
925 gdprintk(XENLOG_INFO, "gnttab_transfer: xen frame %lx\n",
926 (unsigned long)gop.mfn);
927 gop.status = GNTST_bad_page;
928 goto copyback;
929 }
931 if ( steal_page(d, page, 0) < 0 )
932 {
933 gop.status = GNTST_bad_page;
934 goto copyback;
935 }
937 /* Find the target domain. */
938 if ( unlikely((e = rcu_lock_domain_by_id(gop.domid)) == NULL) )
939 {
940 gdprintk(XENLOG_INFO, "gnttab_transfer: can't find domain %d\n",
941 gop.domid);
942 page->count_info &= ~(PGC_count_mask|PGC_allocated);
943 free_domheap_page(page);
944 gop.status = GNTST_bad_domain;
945 goto copyback;
946 }
948 if ( xsm_grant_transfer(d, e) )
949 {
950 rcu_unlock_domain(e);
951 gop.status = GNTST_permission_denied;
952 goto copyback;
953 }
955 spin_lock(&e->page_alloc_lock);
957 /*
958 * Check that 'e' will accept the page and has reservation
959 * headroom. Also, a domain mustn't have PGC_allocated
960 * pages when it is dying.
961 */
962 if ( unlikely(e->is_dying) ||
963 unlikely(e->tot_pages >= e->max_pages) ||
964 unlikely(!gnttab_prepare_for_transfer(e, d, gop.ref)) )
965 {
966 if ( !e->is_dying )
967 gdprintk(XENLOG_INFO, "gnttab_transfer: "
968 "Transferee has no reservation "
969 "headroom (%d,%d) or provided a bad grant ref (%08x) "
970 "or is dying (%d)\n",
971 e->tot_pages, e->max_pages, gop.ref, e->is_dying);
972 spin_unlock(&e->page_alloc_lock);
973 rcu_unlock_domain(e);
974 page->count_info &= ~(PGC_count_mask|PGC_allocated);
975 free_domheap_page(page);
976 gop.status = GNTST_general_error;
977 goto copyback;
978 }
980 /* Okay, add the page to 'e'. */
981 if ( unlikely(e->tot_pages++ == 0) )
982 get_knownalive_domain(e);
983 list_add_tail(&page->list, &e->page_list);
984 page_set_owner(page, e);
986 spin_unlock(&e->page_alloc_lock);
988 TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
990 /* Tell the guest about its new page frame. */
991 spin_lock(&e->grant_table->lock);
993 sha = &shared_entry(e->grant_table, gop.ref);
994 guest_physmap_add_page(e, sha->frame, mfn);
995 sha->frame = mfn;
996 wmb();
997 sha->flags |= GTF_transfer_completed;
999 spin_unlock(&e->grant_table->lock);
1001 rcu_unlock_domain(e);
1003 gop.status = GNTST_okay;
1005 copyback:
1006 if ( unlikely(__copy_to_guest_offset(uop, i, &gop, 1)) )
1008 gdprintk(XENLOG_INFO, "gnttab_transfer: error writing resp "
1009 "%d/%d\n", i, count);
1010 return -EFAULT;
1014 return 0;
1017 /* Undo __acquire_grant_for_copy. Again, this has no effect on page
1018 type and reference counts. */
1019 static void
1020 __release_grant_for_copy(
1021 struct domain *rd, unsigned long gref, int readonly)
1023 grant_entry_t *sha;
1024 struct active_grant_entry *act;
1025 unsigned long r_frame;
1027 spin_lock(&rd->grant_table->lock);
1029 act = &active_entry(rd->grant_table, gref);
1030 sha = &shared_entry(rd->grant_table, gref);
1031 r_frame = act->frame;
1033 if ( readonly )
1035 act->pin -= GNTPIN_hstr_inc;
1037 else
1039 gnttab_mark_dirty(rd, r_frame);
1041 act->pin -= GNTPIN_hstw_inc;
1042 if ( !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) )
1043 gnttab_clear_flag(_GTF_writing, &sha->flags);
1046 if ( !act->pin )
1047 gnttab_clear_flag(_GTF_reading, &sha->flags);
1049 spin_unlock(&rd->grant_table->lock);
1052 /* Grab a frame number from a grant entry and update the flags and pin
1053 count as appropriate. Note that this does *not* update the page
1054 type or reference counts, and does not check that the mfn is
1055 actually valid. */
1056 static int
1057 __acquire_grant_for_copy(
1058 struct domain *rd, unsigned long gref, int readonly,
1059 unsigned long *frame)
1061 grant_entry_t *sha;
1062 struct active_grant_entry *act;
1063 s16 rc = GNTST_okay;
1064 int retries = 0;
1065 union grant_combo scombo, prev_scombo, new_scombo;
1067 spin_lock(&rd->grant_table->lock);
1069 if ( unlikely(gref >= nr_grant_entries(rd->grant_table)) )
1070 PIN_FAIL(unlock_out, GNTST_bad_gntref,
1071 "Bad grant reference %ld\n", gref);
1073 act = &active_entry(rd->grant_table, gref);
1074 sha = &shared_entry(rd->grant_table, gref);
1076 /* If already pinned, check the active domid and avoid refcnt overflow. */
1077 if ( act->pin &&
1078 ((act->domid != current->domain->domain_id) ||
1079 (act->pin & 0x80808080U) != 0) )
1080 PIN_FAIL(unlock_out, GNTST_general_error,
1081 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
1082 act->domid, current->domain->domain_id, act->pin);
1084 if ( !act->pin ||
1085 (!readonly && !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask))) )
1087 scombo.word = *(u32 *)&sha->flags;
1089 for ( ; ; )
1091 /* If not already pinned, check the grant domid and type. */
1092 if ( !act->pin &&
1093 (((scombo.shorts.flags & GTF_type_mask) !=
1094 GTF_permit_access) ||
1095 (scombo.shorts.domid != current->domain->domain_id)) )
1096 PIN_FAIL(unlock_out, GNTST_general_error,
1097 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
1098 scombo.shorts.flags, scombo.shorts.domid,
1099 current->domain->domain_id);
1101 new_scombo = scombo;
1102 new_scombo.shorts.flags |= GTF_reading;
1104 if ( !readonly )
1106 new_scombo.shorts.flags |= GTF_writing;
1107 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
1108 PIN_FAIL(unlock_out, GNTST_general_error,
1109 "Attempt to write-pin a r/o grant entry.\n");
1112 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
1113 scombo.word, new_scombo.word);
1114 if ( likely(prev_scombo.word == scombo.word) )
1115 break;
1117 if ( retries++ == 4 )
1118 PIN_FAIL(unlock_out, GNTST_general_error,
1119 "Shared grant entry is unstable.\n");
1121 scombo = prev_scombo;
1124 if ( !act->pin )
1126 act->domid = scombo.shorts.domid;
1127 act->frame = gmfn_to_mfn(rd, sha->frame);
1131 act->pin += readonly ? GNTPIN_hstr_inc : GNTPIN_hstw_inc;
1133 *frame = act->frame;
1135 unlock_out:
1136 spin_unlock(&rd->grant_table->lock);
1137 return rc;
1140 static void
1141 __gnttab_copy(
1142 struct gnttab_copy *op)
1144 struct domain *sd = NULL, *dd = NULL;
1145 unsigned long s_frame, d_frame;
1146 char *sp, *dp;
1147 s16 rc = GNTST_okay;
1148 int have_d_grant = 0, have_s_grant = 0, have_s_ref = 0;
1149 int src_is_gref, dest_is_gref;
1151 if ( ((op->source.offset + op->len) > PAGE_SIZE) ||
1152 ((op->dest.offset + op->len) > PAGE_SIZE) )
1153 PIN_FAIL(error_out, GNTST_bad_copy_arg, "copy beyond page area.\n");
1155 src_is_gref = op->flags & GNTCOPY_source_gref;
1156 dest_is_gref = op->flags & GNTCOPY_dest_gref;
1158 if ( (op->source.domid != DOMID_SELF && !src_is_gref ) ||
1159 (op->dest.domid != DOMID_SELF && !dest_is_gref) )
1160 PIN_FAIL(error_out, GNTST_permission_denied,
1161 "only allow copy-by-mfn for DOMID_SELF.\n");
1163 if ( op->source.domid == DOMID_SELF )
1164 sd = rcu_lock_current_domain();
1165 else if ( (sd = rcu_lock_domain_by_id(op->source.domid)) == NULL )
1166 PIN_FAIL(error_out, GNTST_bad_domain,
1167 "couldn't find %d\n", op->source.domid);
1169 if ( op->dest.domid == DOMID_SELF )
1170 dd = rcu_lock_current_domain();
1171 else if ( (dd = rcu_lock_domain_by_id(op->dest.domid)) == NULL )
1172 PIN_FAIL(error_out, GNTST_bad_domain,
1173 "couldn't find %d\n", op->dest.domid);
1175 rc = xsm_grant_copy(sd, dd);
1176 if ( rc )
1178 rc = GNTST_permission_denied;
1179 goto error_out;
1182 if ( src_is_gref )
1184 rc = __acquire_grant_for_copy(sd, op->source.u.ref, 1, &s_frame);
1185 if ( rc != GNTST_okay )
1186 goto error_out;
1187 have_s_grant = 1;
1189 else
1191 s_frame = gmfn_to_mfn(sd, op->source.u.gmfn);
1193 if ( unlikely(!mfn_valid(s_frame)) )
1194 PIN_FAIL(error_out, GNTST_general_error,
1195 "source frame %lx invalid.\n", s_frame);
1196 if ( !get_page(mfn_to_page(s_frame), sd) )
1198 if ( !sd->is_dying )
1199 gdprintk(XENLOG_WARNING, "Could not get src frame %lx\n", s_frame);
1200 rc = GNTST_general_error;
1201 goto error_out;
1203 have_s_ref = 1;
1205 if ( dest_is_gref )
1207 rc = __acquire_grant_for_copy(dd, op->dest.u.ref, 0, &d_frame);
1208 if ( rc != GNTST_okay )
1209 goto error_out;
1210 have_d_grant = 1;
1212 else
1214 d_frame = gmfn_to_mfn(dd, op->dest.u.gmfn);
1216 if ( unlikely(!mfn_valid(d_frame)) )
1217 PIN_FAIL(error_out, GNTST_general_error,
1218 "destination frame %lx invalid.\n", d_frame);
1219 if ( !get_page_and_type(mfn_to_page(d_frame), dd, PGT_writable_page) )
1221 if ( !dd->is_dying )
1222 gdprintk(XENLOG_WARNING, "Could not get dst frame %lx\n", d_frame);
1223 rc = GNTST_general_error;
1224 goto error_out;
1227 sp = map_domain_page(s_frame);
1228 dp = map_domain_page(d_frame);
1230 memcpy(dp + op->dest.offset, sp + op->source.offset, op->len);
1232 unmap_domain_page(dp);
1233 unmap_domain_page(sp);
1235 gnttab_mark_dirty(dd, d_frame);
1237 put_page_and_type(mfn_to_page(d_frame));
1238 error_out:
1239 if ( have_s_ref )
1240 put_page(mfn_to_page(s_frame));
1241 if ( have_s_grant )
1242 __release_grant_for_copy(sd, op->source.u.ref, 1);
1243 if ( have_d_grant )
1244 __release_grant_for_copy(dd, op->dest.u.ref, 0);
1245 if ( sd )
1246 rcu_unlock_domain(sd);
1247 if ( dd )
1248 rcu_unlock_domain(dd);
1249 op->status = rc;
1252 static long
1253 gnttab_copy(
1254 XEN_GUEST_HANDLE(gnttab_copy_t) uop, unsigned int count)
1256 int i;
1257 struct gnttab_copy op;
1259 for ( i = 0; i < count; i++ )
1261 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
1262 return -EFAULT;
1263 __gnttab_copy(&op);
1264 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
1265 return -EFAULT;
1267 return 0;
1270 long
1271 do_grant_table_op(
1272 unsigned int cmd, XEN_GUEST_HANDLE(void) uop, unsigned int count)
1274 long rc;
1275 struct domain *d = current->domain;
1277 if ( count > 512 )
1278 return -EINVAL;
1280 LOCK_BIGLOCK(d);
1282 rc = -EFAULT;
1283 switch ( cmd )
1285 case GNTTABOP_map_grant_ref:
1287 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) map =
1288 guest_handle_cast(uop, gnttab_map_grant_ref_t);
1289 if ( unlikely(!guest_handle_okay(map, count)) )
1290 goto out;
1291 rc = -EPERM;
1292 if ( unlikely(!grant_operation_permitted(d)) )
1293 goto out;
1294 rc = gnttab_map_grant_ref(map, count);
1295 break;
1297 case GNTTABOP_unmap_grant_ref:
1299 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) unmap =
1300 guest_handle_cast(uop, gnttab_unmap_grant_ref_t);
1301 if ( unlikely(!guest_handle_okay(unmap, count)) )
1302 goto out;
1303 rc = -EPERM;
1304 if ( unlikely(!grant_operation_permitted(d)) )
1305 goto out;
1306 rc = gnttab_unmap_grant_ref(unmap, count);
1307 break;
1309 case GNTTABOP_unmap_and_replace:
1311 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) unmap =
1312 guest_handle_cast(uop, gnttab_unmap_and_replace_t);
1313 if ( unlikely(!guest_handle_okay(unmap, count)) )
1314 goto out;
1315 rc = -EPERM;
1316 if ( unlikely(!grant_operation_permitted(d)) )
1317 goto out;
1318 rc = -ENOSYS;
1319 if ( unlikely(!replace_grant_supported()) )
1320 goto out;
1321 rc = gnttab_unmap_and_replace(unmap, count);
1322 break;
1324 case GNTTABOP_setup_table:
1326 rc = gnttab_setup_table(
1327 guest_handle_cast(uop, gnttab_setup_table_t), count);
1328 break;
1330 case GNTTABOP_transfer:
1332 XEN_GUEST_HANDLE(gnttab_transfer_t) transfer =
1333 guest_handle_cast(uop, gnttab_transfer_t);
1334 if ( unlikely(!guest_handle_okay(transfer, count)) )
1335 goto out;
1336 rc = -EPERM;
1337 if ( unlikely(!grant_operation_permitted(d)) )
1338 goto out;
1339 rc = gnttab_transfer(transfer, count);
1340 break;
1342 case GNTTABOP_copy:
1344 XEN_GUEST_HANDLE(gnttab_copy_t) copy =
1345 guest_handle_cast(uop, gnttab_copy_t);
1346 if ( unlikely(!guest_handle_okay(copy, count)) )
1347 goto out;
1348 rc = gnttab_copy(copy, count);
1349 break;
1351 case GNTTABOP_query_size:
1353 rc = gnttab_query_size(
1354 guest_handle_cast(uop, gnttab_query_size_t), count);
1355 break;
1357 default:
1358 rc = -ENOSYS;
1359 break;
1362 out:
1363 UNLOCK_BIGLOCK(d);
1365 return rc;
1368 #ifdef CONFIG_COMPAT
1369 #include "compat/grant_table.c"
1370 #endif
1372 static unsigned int max_nr_active_grant_frames(void)
1374 return (((max_nr_grant_frames * (PAGE_SIZE / sizeof(grant_entry_t))) +
1375 ((PAGE_SIZE / sizeof(struct active_grant_entry))-1))
1376 / (PAGE_SIZE / sizeof(struct active_grant_entry)));
1379 int
1380 grant_table_create(
1381 struct domain *d)
1383 struct grant_table *t;
1384 int i;
1386 /* If this sizeof assertion fails, fix the function: shared_index */
1387 ASSERT(sizeof(grant_entry_t) == 8);
1389 if ( (t = xmalloc(struct grant_table)) == NULL )
1390 goto no_mem_0;
1392 /* Simple stuff. */
1393 memset(t, 0, sizeof(*t));
1394 spin_lock_init(&t->lock);
1395 t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
1397 /* Active grant table. */
1398 if ( (t->active = xmalloc_array(struct active_grant_entry *,
1399 max_nr_active_grant_frames())) == NULL )
1400 goto no_mem_1;
1401 memset(t->active, 0, max_nr_active_grant_frames() * sizeof(t->active[0]));
1402 for ( i = 0;
1403 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1405 if ( (t->active[i] = alloc_xenheap_page()) == NULL )
1406 goto no_mem_2;
1407 clear_page(t->active[i]);
1410 /* Tracking of mapped foreign frames table */
1411 if ( (t->maptrack = xmalloc_array(struct grant_mapping *,
1412 max_nr_maptrack_frames())) == NULL )
1413 goto no_mem_2;
1414 memset(t->maptrack, 0, max_nr_maptrack_frames() * sizeof(t->maptrack[0]));
1415 if ( (t->maptrack[0] = alloc_xenheap_page()) == NULL )
1416 goto no_mem_3;
1417 clear_page(t->maptrack[0]);
1418 t->maptrack_limit = PAGE_SIZE / sizeof(struct grant_mapping);
1419 for ( i = 0; i < t->maptrack_limit; i++ )
1420 t->maptrack[0][i].ref = i+1;
1422 /* Shared grant table. */
1423 if ( (t->shared = xmalloc_array(struct grant_entry *,
1424 max_nr_grant_frames)) == NULL )
1425 goto no_mem_3;
1426 memset(t->shared, 0, max_nr_grant_frames * sizeof(t->shared[0]));
1427 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1429 if ( (t->shared[i] = alloc_xenheap_page()) == NULL )
1430 goto no_mem_4;
1431 clear_page(t->shared[i]);
1434 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1435 gnttab_create_shared_page(d, t, i);
1437 /* Okay, install the structure. */
1438 d->grant_table = t;
1439 return 0;
1441 no_mem_4:
1442 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1443 free_xenheap_page(t->shared[i]);
1444 xfree(t->shared);
1445 no_mem_3:
1446 free_xenheap_page(t->maptrack[0]);
1447 xfree(t->maptrack);
1448 no_mem_2:
1449 for ( i = 0;
1450 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1451 free_xenheap_page(t->active[i]);
1452 xfree(t->active);
1453 no_mem_1:
1454 xfree(t);
1455 no_mem_0:
1456 return -ENOMEM;
1459 void
1460 gnttab_release_mappings(
1461 struct domain *d)
1463 struct grant_table *gt = d->grant_table;
1464 struct grant_mapping *map;
1465 grant_ref_t ref;
1466 grant_handle_t handle;
1467 struct domain *rd;
1468 struct active_grant_entry *act;
1469 struct grant_entry *sha;
1471 BUG_ON(!d->is_dying);
1473 for ( handle = 0; handle < gt->maptrack_limit; handle++ )
1475 map = &maptrack_entry(gt, handle);
1476 if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
1477 continue;
1479 ref = map->ref;
1481 gdprintk(XENLOG_INFO, "Grant release (%hu) ref:(%hu) "
1482 "flags:(%x) dom:(%hu)\n",
1483 handle, ref, map->flags, map->domid);
1485 rd = rcu_lock_domain_by_id(map->domid);
1486 if ( rd == NULL )
1488 /* Nothing to clear up... */
1489 map->flags = 0;
1490 continue;
1493 spin_lock(&rd->grant_table->lock);
1495 act = &active_entry(rd->grant_table, ref);
1496 sha = &shared_entry(rd->grant_table, ref);
1498 if ( map->flags & GNTMAP_readonly )
1500 if ( map->flags & GNTMAP_device_map )
1502 BUG_ON(!(act->pin & GNTPIN_devr_mask));
1503 act->pin -= GNTPIN_devr_inc;
1504 put_page(mfn_to_page(act->frame));
1507 if ( map->flags & GNTMAP_host_map )
1509 BUG_ON(!(act->pin & GNTPIN_hstr_mask));
1510 act->pin -= GNTPIN_hstr_inc;
1511 gnttab_release_put_page(mfn_to_page(act->frame));
1514 else
1516 if ( map->flags & GNTMAP_device_map )
1518 BUG_ON(!(act->pin & GNTPIN_devw_mask));
1519 act->pin -= GNTPIN_devw_inc;
1520 put_page_and_type(mfn_to_page(act->frame));
1523 if ( map->flags & GNTMAP_host_map )
1525 BUG_ON(!(act->pin & GNTPIN_hstw_mask));
1526 act->pin -= GNTPIN_hstw_inc;
1527 gnttab_release_put_page_and_type(mfn_to_page(act->frame));
1530 if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
1531 gnttab_clear_flag(_GTF_writing, &sha->flags);
1534 if ( act->pin == 0 )
1535 gnttab_clear_flag(_GTF_reading, &sha->flags);
1537 spin_unlock(&rd->grant_table->lock);
1539 rcu_unlock_domain(rd);
1541 map->flags = 0;
1546 void
1547 grant_table_destroy(
1548 struct domain *d)
1550 struct grant_table *t = d->grant_table;
1551 int i;
1553 if ( t == NULL )
1554 return;
1556 for ( i = 0; i < nr_grant_frames(t); i++ )
1557 free_xenheap_page(t->shared[i]);
1558 xfree(t->shared);
1560 for ( i = 0; i < nr_maptrack_frames(t); i++ )
1561 free_xenheap_page(t->maptrack[i]);
1562 xfree(t->maptrack);
1564 for ( i = 0; i < nr_active_grant_frames(t); i++ )
1565 free_xenheap_page(t->active[i]);
1566 xfree(t->active);
1568 xfree(t);
1569 d->grant_table = NULL;
1572 /*
1573 * Local variables:
1574 * mode: C
1575 * c-set-style: "BSD"
1576 * c-basic-offset: 4
1577 * tab-width: 4
1578 * indent-tabs-mode: nil
1579 * End:
1580 */