ia64/xen-unstable

view xen/common/grant_table.c @ 16407:2e5d922b7ee3

xen: Allow granting of foreign access to iomem pages, and with
arbitrary cache attributes.
Signed-off-by: Kieran Mansley <kmansley@solarflare.com>
Signed-off-by: Keir Fraser <keir.fraser@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Nov 20 17:26:48 2007 +0000 (2007-11-20)
parents 37be0bb60518
children cd5e1e76d0bc
line source
1 /******************************************************************************
2 * common/grant_table.c
3 *
4 * Mechanism for granting foreign access to page frames, and receiving
5 * page-ownership transfers.
6 *
7 * Copyright (c) 2005-2006 Christopher Clark
8 * Copyright (c) 2004 K A Fraser
9 * Copyright (c) 2005 Andrew Warfield
10 * Modifications by Geoffrey Lefebvre are (c) Intel Research Cambridge
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
27 #include <xen/config.h>
28 #include <xen/iocap.h>
29 #include <xen/lib.h>
30 #include <xen/sched.h>
31 #include <xen/mm.h>
32 #include <xen/trace.h>
33 #include <xen/guest_access.h>
34 #include <xen/domain_page.h>
35 #include <xsm/xsm.h>
37 #ifndef max_nr_grant_frames
38 unsigned int max_nr_grant_frames = DEFAULT_MAX_NR_GRANT_FRAMES;
39 integer_param("gnttab_max_nr_frames", max_nr_grant_frames);
40 #endif
42 /* The maximum number of grant mappings is defined as a multiplier of the
43 * maximum number of grant table entries. This defines the multiplier used.
44 * Pretty arbitrary. [POLICY]
45 */
46 #define MAX_MAPTRACK_TO_GRANTS_RATIO 8
48 /*
49 * The first two members of a grant entry are updated as a combined pair.
50 * The following union allows that to happen in an endian-neutral fashion.
51 */
52 union grant_combo {
53 uint32_t word;
54 struct {
55 uint16_t flags;
56 domid_t domid;
57 } shorts;
58 };
60 /* Used to share code between unmap_grant_ref and unmap_and_replace. */
61 struct gnttab_unmap_common {
62 /* Input */
63 uint64_t host_addr;
64 uint64_t dev_bus_addr;
65 uint64_t new_addr;
66 grant_handle_t handle;
68 /* Return */
69 int16_t status;
71 /* Shared state beteen *_unmap and *_unmap_complete */
72 u16 flags;
73 unsigned long frame;
74 struct grant_mapping *map;
75 struct domain *rd;
76 };
78 /* Number of unmap operations that are done between each tlb flush */
79 #define GNTTAB_UNMAP_BATCH_SIZE 32
82 #define PIN_FAIL(_lbl, _rc, _f, _a...) \
83 do { \
84 gdprintk(XENLOG_WARNING, _f, ## _a ); \
85 rc = (_rc); \
86 goto _lbl; \
87 } while ( 0 )
89 #define MAPTRACK_PER_PAGE (PAGE_SIZE / sizeof(struct grant_mapping))
90 #define maptrack_entry(t, e) \
91 ((t)->maptrack[(e)/MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE])
93 static inline unsigned int
94 nr_maptrack_frames(struct grant_table *t)
95 {
96 return t->maptrack_limit / MAPTRACK_PER_PAGE;
97 }
99 static unsigned inline int max_nr_maptrack_frames(void)
100 {
101 return (max_nr_grant_frames * MAX_MAPTRACK_TO_GRANTS_RATIO);
102 }
105 #define SHGNT_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_t))
106 #define shared_entry(t, e) \
107 ((t)->shared[(e)/SHGNT_PER_PAGE][(e)%SHGNT_PER_PAGE])
108 #define ACGNT_PER_PAGE (PAGE_SIZE / sizeof(struct active_grant_entry))
109 #define active_entry(t, e) \
110 ((t)->active[(e)/ACGNT_PER_PAGE][(e)%ACGNT_PER_PAGE])
112 static inline int
113 __get_maptrack_handle(
114 struct grant_table *t)
115 {
116 unsigned int h;
117 if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) )
118 return -1;
119 t->maptrack_head = maptrack_entry(t, h).ref;
120 t->map_count++;
121 return h;
122 }
124 static inline void
125 put_maptrack_handle(
126 struct grant_table *t, int handle)
127 {
128 maptrack_entry(t, handle).ref = t->maptrack_head;
129 t->maptrack_head = handle;
130 t->map_count--;
131 }
133 static inline int
134 get_maptrack_handle(
135 struct grant_table *lgt)
136 {
137 int i;
138 grant_handle_t handle;
139 struct grant_mapping *new_mt;
140 unsigned int new_mt_limit, nr_frames;
142 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
143 {
144 spin_lock(&lgt->lock);
146 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
147 {
148 nr_frames = nr_maptrack_frames(lgt);
149 if ( nr_frames >= max_nr_maptrack_frames() )
150 {
151 spin_unlock(&lgt->lock);
152 return -1;
153 }
155 new_mt = alloc_xenheap_page();
156 if ( new_mt == NULL )
157 {
158 spin_unlock(&lgt->lock);
159 return -1;
160 }
162 clear_page(new_mt);
164 new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE;
166 for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
167 {
168 new_mt[i % MAPTRACK_PER_PAGE].ref = i+1;
169 new_mt[i % MAPTRACK_PER_PAGE].flags = 0;
170 }
172 lgt->maptrack[nr_frames] = new_mt;
173 lgt->maptrack_limit = new_mt_limit;
175 gdprintk(XENLOG_INFO,
176 "Increased maptrack size to %u frames.\n", nr_frames + 1);
177 handle = __get_maptrack_handle(lgt);
178 }
180 spin_unlock(&lgt->lock);
181 }
182 return handle;
183 }
185 /*
186 * Returns 0 if TLB flush / invalidate required by caller.
187 * va will indicate the address to be invalidated.
188 *
189 * addr is _either_ a host virtual address, or the address of the pte to
190 * update, as indicated by the GNTMAP_contains_pte flag.
191 */
192 static void
193 __gnttab_map_grant_ref(
194 struct gnttab_map_grant_ref *op)
195 {
196 struct domain *ld, *rd;
197 struct vcpu *led;
198 int handle;
199 unsigned long frame = 0;
200 int rc = GNTST_okay;
201 unsigned int cache_flags;
202 struct active_grant_entry *act;
203 struct grant_mapping *mt;
204 grant_entry_t *sha;
205 union grant_combo scombo, prev_scombo, new_scombo;
207 /*
208 * We bound the number of times we retry CMPXCHG on memory locations that
209 * we share with a guest OS. The reason is that the guest can modify that
210 * location at a higher rate than we can read-modify-CMPXCHG, so the guest
211 * could cause us to livelock. There are a few cases where it is valid for
212 * the guest to race our updates (e.g., to change the GTF_readonly flag),
213 * so we allow a few retries before failing.
214 */
215 int retries = 0;
217 led = current;
218 ld = led->domain;
220 if ( unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) )
221 {
222 gdprintk(XENLOG_INFO, "Bad flags in grant map op (%x).\n", op->flags);
223 op->status = GNTST_bad_gntref;
224 return;
225 }
227 if ( unlikely((rd = rcu_lock_domain_by_id(op->dom)) == NULL) )
228 {
229 gdprintk(XENLOG_INFO, "Could not find domain %d\n", op->dom);
230 op->status = GNTST_bad_domain;
231 return;
232 }
234 rc = xsm_grant_mapref(ld, rd, op->flags);
235 if ( rc )
236 {
237 rcu_unlock_domain(rd);
238 op->status = GNTST_permission_denied;
239 return;
240 }
242 if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) )
243 {
244 rcu_unlock_domain(rd);
245 gdprintk(XENLOG_INFO, "Failed to obtain maptrack handle.\n");
246 op->status = GNTST_no_device_space;
247 return;
248 }
250 spin_lock(&rd->grant_table->lock);
252 /* Bounds check on the grant ref */
253 if ( unlikely(op->ref >= nr_grant_entries(rd->grant_table)))
254 PIN_FAIL(unlock_out, GNTST_bad_gntref, "Bad ref (%d).\n", op->ref);
256 act = &active_entry(rd->grant_table, op->ref);
257 sha = &shared_entry(rd->grant_table, op->ref);
259 /* If already pinned, check the active domid and avoid refcnt overflow. */
260 if ( act->pin &&
261 ((act->domid != ld->domain_id) ||
262 (act->pin & 0x80808080U) != 0) )
263 PIN_FAIL(unlock_out, GNTST_general_error,
264 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
265 act->domid, ld->domain_id, act->pin);
267 if ( !act->pin ||
268 (!(op->flags & GNTMAP_readonly) &&
269 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask))) )
270 {
271 scombo.word = *(u32 *)&sha->flags;
273 /*
274 * This loop attempts to set the access (reading/writing) flags
275 * in the grant table entry. It tries a cmpxchg on the field
276 * up to five times, and then fails under the assumption that
277 * the guest is misbehaving.
278 */
279 for ( ; ; )
280 {
281 /* If not already pinned, check the grant domid and type. */
282 if ( !act->pin &&
283 (((scombo.shorts.flags & GTF_type_mask) !=
284 GTF_permit_access) ||
285 (scombo.shorts.domid != ld->domain_id)) )
286 PIN_FAIL(unlock_out, GNTST_general_error,
287 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
288 scombo.shorts.flags, scombo.shorts.domid,
289 ld->domain_id);
291 new_scombo = scombo;
292 new_scombo.shorts.flags |= GTF_reading;
294 if ( !(op->flags & GNTMAP_readonly) )
295 {
296 new_scombo.shorts.flags |= GTF_writing;
297 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
298 PIN_FAIL(unlock_out, GNTST_general_error,
299 "Attempt to write-pin a r/o grant entry.\n");
300 }
302 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
303 scombo.word, new_scombo.word);
304 if ( likely(prev_scombo.word == scombo.word) )
305 break;
307 if ( retries++ == 4 )
308 PIN_FAIL(unlock_out, GNTST_general_error,
309 "Shared grant entry is unstable.\n");
311 scombo = prev_scombo;
312 }
314 if ( !act->pin )
315 {
316 act->domid = scombo.shorts.domid;
317 act->frame = gmfn_to_mfn(rd, sha->frame);
318 }
319 }
321 if ( op->flags & GNTMAP_device_map )
322 act->pin += (op->flags & GNTMAP_readonly) ?
323 GNTPIN_devr_inc : GNTPIN_devw_inc;
324 if ( op->flags & GNTMAP_host_map )
325 act->pin += (op->flags & GNTMAP_readonly) ?
326 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
328 frame = act->frame;
330 cache_flags = (sha->flags & (GTF_PAT | GTF_PWT | GTF_PCD) );
332 spin_unlock(&rd->grant_table->lock);
334 if ( is_iomem_page(frame) )
335 {
336 if ( !iomem_access_permitted(rd, frame, frame) )
337 {
338 gdprintk(XENLOG_WARNING,
339 "Iomem mapping not permitted %lx (domain %d)\n",
340 frame, rd->domain_id);
341 rc = GNTST_general_error;
342 goto undo_out;
343 }
345 rc = create_grant_host_mapping(
346 op->host_addr, frame, op->flags, cache_flags);
347 if ( rc != GNTST_okay )
348 goto undo_out;
349 }
350 else
351 {
352 if ( unlikely(!mfn_valid(frame)) ||
353 unlikely(!((op->flags & GNTMAP_readonly) ?
354 get_page(mfn_to_page(frame), rd) :
355 get_page_and_type(mfn_to_page(frame), rd,
356 PGT_writable_page))) )
357 {
358 if ( !rd->is_dying )
359 gdprintk(XENLOG_WARNING, "Could not pin grant frame %lx\n",
360 frame);
361 rc = GNTST_general_error;
362 goto undo_out;
363 }
365 if ( op->flags & GNTMAP_host_map )
366 {
367 rc = create_grant_host_mapping(op->host_addr, frame, op->flags, 0);
368 if ( rc != GNTST_okay )
369 {
370 if ( !(op->flags & GNTMAP_readonly) )
371 put_page_type(mfn_to_page(frame));
372 put_page(mfn_to_page(frame));
373 goto undo_out;
374 }
376 if ( op->flags & GNTMAP_device_map )
377 {
378 (void)get_page(mfn_to_page(frame), rd);
379 if ( !(op->flags & GNTMAP_readonly) )
380 get_page_type(mfn_to_page(frame), PGT_writable_page);
381 }
382 }
383 }
385 TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
387 mt = &maptrack_entry(ld->grant_table, handle);
388 mt->domid = op->dom;
389 mt->ref = op->ref;
390 mt->flags = op->flags;
392 op->dev_bus_addr = (u64)frame << PAGE_SHIFT;
393 op->handle = handle;
394 op->status = GNTST_okay;
396 rcu_unlock_domain(rd);
397 return;
399 undo_out:
400 spin_lock(&rd->grant_table->lock);
402 act = &active_entry(rd->grant_table, op->ref);
403 sha = &shared_entry(rd->grant_table, op->ref);
405 if ( op->flags & GNTMAP_device_map )
406 act->pin -= (op->flags & GNTMAP_readonly) ?
407 GNTPIN_devr_inc : GNTPIN_devw_inc;
408 if ( op->flags & GNTMAP_host_map )
409 act->pin -= (op->flags & GNTMAP_readonly) ?
410 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
412 if ( !(op->flags & GNTMAP_readonly) &&
413 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
414 gnttab_clear_flag(_GTF_writing, &sha->flags);
416 if ( !act->pin )
417 gnttab_clear_flag(_GTF_reading, &sha->flags);
419 unlock_out:
420 spin_unlock(&rd->grant_table->lock);
421 op->status = rc;
422 put_maptrack_handle(ld->grant_table, handle);
423 rcu_unlock_domain(rd);
424 }
426 static long
427 gnttab_map_grant_ref(
428 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) uop, unsigned int count)
429 {
430 int i;
431 struct gnttab_map_grant_ref op;
433 for ( i = 0; i < count; i++ )
434 {
435 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
436 return -EFAULT;
437 __gnttab_map_grant_ref(&op);
438 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
439 return -EFAULT;
440 }
442 return 0;
443 }
445 static void
446 __gnttab_unmap_common(
447 struct gnttab_unmap_common *op)
448 {
449 domid_t dom;
450 struct domain *ld, *rd;
451 struct active_grant_entry *act;
452 grant_entry_t *sha;
453 s16 rc = 0;
455 ld = current->domain;
457 op->frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT);
459 if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) )
460 {
461 gdprintk(XENLOG_INFO, "Bad handle (%d).\n", op->handle);
462 op->status = GNTST_bad_handle;
463 return;
464 }
466 op->map = &maptrack_entry(ld->grant_table, op->handle);
468 if ( unlikely(!op->map->flags) )
469 {
470 gdprintk(XENLOG_INFO, "Zero flags for handle (%d).\n", op->handle);
471 op->status = GNTST_bad_handle;
472 return;
473 }
475 dom = op->map->domid;
476 op->flags = op->map->flags;
478 if ( unlikely((op->rd = rd = rcu_lock_domain_by_id(dom)) == NULL) )
479 {
480 /* This can happen when a grant is implicitly unmapped. */
481 gdprintk(XENLOG_INFO, "Could not find domain %d\n", dom);
482 domain_crash(ld); /* naughty... */
483 return;
484 }
486 rc = xsm_grant_unmapref(ld, rd);
487 if ( rc )
488 {
489 rcu_unlock_domain(rd);
490 op->status = GNTST_permission_denied;
491 return;
492 }
494 TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom);
496 spin_lock(&rd->grant_table->lock);
498 act = &active_entry(rd->grant_table, op->map->ref);
499 sha = &shared_entry(rd->grant_table, op->map->ref);
501 if ( op->frame == 0 )
502 {
503 op->frame = act->frame;
504 }
505 else
506 {
507 if ( unlikely(op->frame != act->frame) )
508 PIN_FAIL(unmap_out, GNTST_general_error,
509 "Bad frame number doesn't match gntref. (%lx != %lx)\n",
510 op->frame, act->frame);
511 if ( op->flags & GNTMAP_device_map )
512 {
513 ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
514 op->map->flags &= ~GNTMAP_device_map;
515 if ( op->flags & GNTMAP_readonly )
516 act->pin -= GNTPIN_devr_inc;
517 else
518 act->pin -= GNTPIN_devw_inc;
519 }
520 }
522 if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
523 {
524 if ( (rc = replace_grant_host_mapping(op->host_addr,
525 op->frame, op->new_addr,
526 op->flags)) < 0 )
527 goto unmap_out;
529 ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
530 op->map->flags &= ~GNTMAP_host_map;
531 if ( op->flags & GNTMAP_readonly )
532 act->pin -= GNTPIN_hstr_inc;
533 else
534 act->pin -= GNTPIN_hstw_inc;
535 }
537 /* If just unmapped a writable mapping, mark as dirtied */
538 if ( !(op->flags & GNTMAP_readonly) )
539 gnttab_mark_dirty(rd, op->frame);
541 unmap_out:
542 op->status = rc;
543 spin_unlock(&rd->grant_table->lock);
544 rcu_unlock_domain(rd);
545 }
547 static void
548 __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
549 {
550 struct domain *ld, *rd;
551 struct active_grant_entry *act;
552 grant_entry_t *sha;
554 rd = op->rd;
556 if ( rd == NULL )
557 {
558 /*
559 * Suggests that __gntab_unmap_common failed in
560 * rcu_lock_domain_by_id() or earlier, and so we have nothing
561 * to complete
562 */
563 return;
564 }
566 ld = current->domain;
568 rcu_lock_domain(rd);
569 spin_lock(&rd->grant_table->lock);
571 act = &active_entry(rd->grant_table, op->map->ref);
572 sha = &shared_entry(rd->grant_table, op->map->ref);
574 if ( unlikely(op->frame != act->frame) )
575 {
576 /*
577 * Suggests that __gntab_unmap_common failed early and so
578 * nothing further to do
579 */
580 goto unmap_out;
581 }
583 if ( op->flags & GNTMAP_device_map )
584 {
585 if ( !is_iomem_page(act->frame) )
586 {
587 if ( op->flags & GNTMAP_readonly )
588 put_page(mfn_to_page(op->frame));
589 else
590 put_page_and_type(mfn_to_page(op->frame));
591 }
592 }
594 if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
595 {
596 if ( op->status != 0 )
597 {
598 /*
599 * Suggests that __gntab_unmap_common failed in
600 * replace_grant_host_mapping() so nothing further to do
601 */
602 goto unmap_out;
603 }
605 if ( !is_iomem_page(op->frame) )
606 {
607 if ( !(op->flags & GNTMAP_readonly) )
608 put_page_type(mfn_to_page(op->frame));
609 put_page(mfn_to_page(op->frame));
610 }
611 }
613 if ( (op->map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
614 {
615 op->map->flags = 0;
616 put_maptrack_handle(ld->grant_table, op->handle);
617 }
619 if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
620 !(op->flags & GNTMAP_readonly) )
621 gnttab_clear_flag(_GTF_writing, &sha->flags);
623 if ( act->pin == 0 )
624 gnttab_clear_flag(_GTF_reading, &sha->flags);
626 unmap_out:
627 spin_unlock(&rd->grant_table->lock);
628 rcu_unlock_domain(rd);
629 }
631 static void
632 __gnttab_unmap_grant_ref(
633 struct gnttab_unmap_grant_ref *op,
634 struct gnttab_unmap_common *common)
635 {
636 common->host_addr = op->host_addr;
637 common->dev_bus_addr = op->dev_bus_addr;
638 common->handle = op->handle;
640 /* Intialise these in case common contains old state */
641 common->new_addr = 0;
642 common->rd = NULL;
644 __gnttab_unmap_common(common);
645 op->status = common->status;
646 }
649 static long
650 gnttab_unmap_grant_ref(
651 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) uop, unsigned int count)
652 {
653 int i, c, partial_done, done = 0;
654 struct gnttab_unmap_grant_ref op;
655 struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
657 while ( count != 0 )
658 {
659 c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
660 partial_done = 0;
662 for ( i = 0; i < c; i++ )
663 {
664 if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
665 goto fault;
666 __gnttab_unmap_grant_ref(&op, &(common[i]));
667 ++partial_done;
668 if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
669 goto fault;
670 }
672 flush_tlb_mask(current->domain->domain_dirty_cpumask);
674 for ( i = 0; i < partial_done; i++ )
675 __gnttab_unmap_common_complete(&(common[i]));
677 count -= c;
678 done += c;
679 }
681 return 0;
683 fault:
684 flush_tlb_mask(current->domain->domain_dirty_cpumask);
686 for ( i = 0; i < partial_done; i++ )
687 __gnttab_unmap_common_complete(&(common[i]));
688 return -EFAULT;
689 }
691 static void
692 __gnttab_unmap_and_replace(
693 struct gnttab_unmap_and_replace *op,
694 struct gnttab_unmap_common *common)
695 {
696 common->host_addr = op->host_addr;
697 common->new_addr = op->new_addr;
698 common->handle = op->handle;
700 /* Intialise these in case common contains old state */
701 common->dev_bus_addr = 0;
702 common->rd = NULL;
704 __gnttab_unmap_common(common);
705 op->status = common->status;
706 }
708 static long
709 gnttab_unmap_and_replace(
710 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) uop, unsigned int count)
711 {
712 int i, c, partial_done, done = 0;
713 struct gnttab_unmap_and_replace op;
714 struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
716 while ( count != 0 )
717 {
718 c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
719 partial_done = 0;
721 for ( i = 0; i < c; i++ )
722 {
723 if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
724 goto fault;
725 __gnttab_unmap_and_replace(&op, &(common[i]));
726 ++partial_done;
727 if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
728 goto fault;
729 }
731 flush_tlb_mask(current->domain->domain_dirty_cpumask);
733 for ( i = 0; i < partial_done; i++ )
734 __gnttab_unmap_common_complete(&(common[i]));
736 count -= c;
737 done += c;
738 }
740 return 0;
742 fault:
743 flush_tlb_mask(current->domain->domain_dirty_cpumask);
745 for ( i = 0; i < partial_done; i++ )
746 __gnttab_unmap_common_complete(&(common[i]));
747 return -EFAULT;
748 }
750 int
751 gnttab_grow_table(struct domain *d, unsigned int req_nr_frames)
752 {
753 /* d's grant table lock must be held by the caller */
755 struct grant_table *gt = d->grant_table;
756 unsigned int i;
758 ASSERT(req_nr_frames <= max_nr_grant_frames);
760 gdprintk(XENLOG_INFO,
761 "Expanding dom (%d) grant table from (%d) to (%d) frames.\n",
762 d->domain_id, nr_grant_frames(gt), req_nr_frames);
764 /* Active */
765 for ( i = nr_active_grant_frames(gt);
766 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
767 {
768 if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
769 goto active_alloc_failed;
770 clear_page(gt->active[i]);
771 }
773 /* Shared */
774 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
775 {
776 if ( (gt->shared[i] = alloc_xenheap_page()) == NULL )
777 goto shared_alloc_failed;
778 clear_page(gt->shared[i]);
779 }
781 /* Share the new shared frames with the recipient domain */
782 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
783 gnttab_create_shared_page(d, gt, i);
785 gt->nr_grant_frames = req_nr_frames;
787 return 1;
789 shared_alloc_failed:
790 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
791 {
792 free_xenheap_page(gt->shared[i]);
793 gt->shared[i] = NULL;
794 }
795 active_alloc_failed:
796 for ( i = nr_active_grant_frames(gt);
797 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
798 {
799 free_xenheap_page(gt->active[i]);
800 gt->active[i] = NULL;
801 }
802 gdprintk(XENLOG_INFO, "Allocation failure when expanding grant table.\n");
803 return 0;
804 }
806 static long
807 gnttab_setup_table(
808 XEN_GUEST_HANDLE(gnttab_setup_table_t) uop, unsigned int count)
809 {
810 struct gnttab_setup_table op;
811 struct domain *d;
812 int i;
813 unsigned long gmfn;
814 domid_t dom;
816 if ( count != 1 )
817 return -EINVAL;
819 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
820 {
821 gdprintk(XENLOG_INFO, "Fault while reading gnttab_setup_table_t.\n");
822 return -EFAULT;
823 }
825 if ( unlikely(op.nr_frames > max_nr_grant_frames) )
826 {
827 gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table frames"
828 " per domain.\n",
829 max_nr_grant_frames);
830 op.status = GNTST_general_error;
831 goto out;
832 }
834 dom = op.dom;
835 if ( dom == DOMID_SELF )
836 {
837 dom = current->domain->domain_id;
838 }
839 else if ( unlikely(!IS_PRIV(current->domain)) )
840 {
841 op.status = GNTST_permission_denied;
842 goto out;
843 }
845 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
846 {
847 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
848 op.status = GNTST_bad_domain;
849 goto out;
850 }
852 if ( xsm_grant_setup(current->domain, d) )
853 {
854 rcu_unlock_domain(d);
855 op.status = GNTST_permission_denied;
856 goto out;
857 }
859 spin_lock(&d->grant_table->lock);
861 if ( (op.nr_frames > nr_grant_frames(d->grant_table)) &&
862 !gnttab_grow_table(d, op.nr_frames) )
863 {
864 gdprintk(XENLOG_INFO,
865 "Expand grant table to %d failed. Current: %d Max: %d.\n",
866 op.nr_frames,
867 nr_grant_frames(d->grant_table),
868 max_nr_grant_frames);
869 op.status = GNTST_general_error;
870 goto setup_unlock_out;
871 }
873 op.status = GNTST_okay;
874 for ( i = 0; i < op.nr_frames; i++ )
875 {
876 gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
877 (void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
878 }
880 setup_unlock_out:
881 spin_unlock(&d->grant_table->lock);
883 rcu_unlock_domain(d);
885 out:
886 if ( unlikely(copy_to_guest(uop, &op, 1)) )
887 return -EFAULT;
889 return 0;
890 }
892 static long
893 gnttab_query_size(
894 XEN_GUEST_HANDLE(gnttab_query_size_t) uop, unsigned int count)
895 {
896 struct gnttab_query_size op;
897 struct domain *d;
898 domid_t dom;
899 int rc;
901 if ( count != 1 )
902 return -EINVAL;
904 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
905 {
906 gdprintk(XENLOG_INFO, "Fault while reading gnttab_query_size_t.\n");
907 return -EFAULT;
908 }
910 dom = op.dom;
911 if ( dom == DOMID_SELF )
912 {
913 dom = current->domain->domain_id;
914 }
915 else if ( unlikely(!IS_PRIV(current->domain)) )
916 {
917 op.status = GNTST_permission_denied;
918 goto query_out;
919 }
921 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
922 {
923 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
924 op.status = GNTST_bad_domain;
925 goto query_out;
926 }
928 rc = xsm_grant_query_size(current->domain, d);
929 if ( rc )
930 {
931 rcu_unlock_domain(d);
932 op.status = GNTST_permission_denied;
933 goto query_out;
934 }
936 spin_lock(&d->grant_table->lock);
938 op.nr_frames = nr_grant_frames(d->grant_table);
939 op.max_nr_frames = max_nr_grant_frames;
940 op.status = GNTST_okay;
942 spin_unlock(&d->grant_table->lock);
944 rcu_unlock_domain(d);
946 query_out:
947 if ( unlikely(copy_to_guest(uop, &op, 1)) )
948 return -EFAULT;
950 return 0;
951 }
953 /*
954 * Check that the given grant reference (rd,ref) allows 'ld' to transfer
955 * ownership of a page frame. If so, lock down the grant entry.
956 */
957 static int
958 gnttab_prepare_for_transfer(
959 struct domain *rd, struct domain *ld, grant_ref_t ref)
960 {
961 struct grant_table *rgt;
962 struct grant_entry *sha;
963 union grant_combo scombo, prev_scombo, new_scombo;
964 int retries = 0;
966 if ( unlikely((rgt = rd->grant_table) == NULL) )
967 {
968 gdprintk(XENLOG_INFO, "Dom %d has no grant table.\n", rd->domain_id);
969 return 0;
970 }
972 spin_lock(&rgt->lock);
974 if ( unlikely(ref >= nr_grant_entries(rd->grant_table)) )
975 {
976 gdprintk(XENLOG_INFO,
977 "Bad grant reference (%d) for transfer to domain(%d).\n",
978 ref, rd->domain_id);
979 goto fail;
980 }
982 sha = &shared_entry(rgt, ref);
984 scombo.word = *(u32 *)&sha->flags;
986 for ( ; ; )
987 {
988 if ( unlikely(scombo.shorts.flags != GTF_accept_transfer) ||
989 unlikely(scombo.shorts.domid != ld->domain_id) )
990 {
991 gdprintk(XENLOG_INFO, "Bad flags (%x) or dom (%d). "
992 "(NB. expected dom %d)\n",
993 scombo.shorts.flags, scombo.shorts.domid,
994 ld->domain_id);
995 goto fail;
996 }
998 new_scombo = scombo;
999 new_scombo.shorts.flags |= GTF_transfer_committed;
1001 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
1002 scombo.word, new_scombo.word);
1003 if ( likely(prev_scombo.word == scombo.word) )
1004 break;
1006 if ( retries++ == 4 )
1008 gdprintk(XENLOG_WARNING, "Shared grant entry is unstable.\n");
1009 goto fail;
1012 scombo = prev_scombo;
1015 spin_unlock(&rgt->lock);
1016 return 1;
1018 fail:
1019 spin_unlock(&rgt->lock);
1020 return 0;
1023 static long
1024 gnttab_transfer(
1025 XEN_GUEST_HANDLE(gnttab_transfer_t) uop, unsigned int count)
1027 struct domain *d = current->domain;
1028 struct domain *e;
1029 struct page_info *page;
1030 int i;
1031 grant_entry_t *sha;
1032 struct gnttab_transfer gop;
1033 unsigned long mfn;
1035 for ( i = 0; i < count; i++ )
1037 /* Read from caller address space. */
1038 if ( unlikely(__copy_from_guest_offset(&gop, uop, i, 1)) )
1040 gdprintk(XENLOG_INFO, "gnttab_transfer: error reading req %d/%d\n",
1041 i, count);
1042 return -EFAULT;
1045 mfn = gmfn_to_mfn(d, gop.mfn);
1047 /* Check the passed page frame for basic validity. */
1048 if ( unlikely(!mfn_valid(mfn)) )
1050 gdprintk(XENLOG_INFO, "gnttab_transfer: out-of-range %lx\n",
1051 (unsigned long)gop.mfn);
1052 gop.status = GNTST_bad_page;
1053 goto copyback;
1056 page = mfn_to_page(mfn);
1057 if ( unlikely(is_xen_heap_page(page)) )
1059 gdprintk(XENLOG_INFO, "gnttab_transfer: xen frame %lx\n",
1060 (unsigned long)gop.mfn);
1061 gop.status = GNTST_bad_page;
1062 goto copyback;
1065 if ( steal_page(d, page, 0) < 0 )
1067 gop.status = GNTST_bad_page;
1068 goto copyback;
1071 /* Find the target domain. */
1072 if ( unlikely((e = rcu_lock_domain_by_id(gop.domid)) == NULL) )
1074 gdprintk(XENLOG_INFO, "gnttab_transfer: can't find domain %d\n",
1075 gop.domid);
1076 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1077 free_domheap_page(page);
1078 gop.status = GNTST_bad_domain;
1079 goto copyback;
1082 if ( xsm_grant_transfer(d, e) )
1084 rcu_unlock_domain(e);
1085 gop.status = GNTST_permission_denied;
1086 goto copyback;
1089 spin_lock(&e->page_alloc_lock);
1091 /*
1092 * Check that 'e' will accept the page and has reservation
1093 * headroom. Also, a domain mustn't have PGC_allocated
1094 * pages when it is dying.
1095 */
1096 if ( unlikely(e->is_dying) ||
1097 unlikely(e->tot_pages >= e->max_pages) ||
1098 unlikely(!gnttab_prepare_for_transfer(e, d, gop.ref)) )
1100 if ( !e->is_dying )
1101 gdprintk(XENLOG_INFO, "gnttab_transfer: "
1102 "Transferee has no reservation "
1103 "headroom (%d,%d) or provided a bad grant ref (%08x) "
1104 "or is dying (%d)\n",
1105 e->tot_pages, e->max_pages, gop.ref, e->is_dying);
1106 spin_unlock(&e->page_alloc_lock);
1107 rcu_unlock_domain(e);
1108 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1109 free_domheap_page(page);
1110 gop.status = GNTST_general_error;
1111 goto copyback;
1114 /* Okay, add the page to 'e'. */
1115 if ( unlikely(e->tot_pages++ == 0) )
1116 get_knownalive_domain(e);
1117 list_add_tail(&page->list, &e->page_list);
1118 page_set_owner(page, e);
1120 spin_unlock(&e->page_alloc_lock);
1122 TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
1124 /* Tell the guest about its new page frame. */
1125 spin_lock(&e->grant_table->lock);
1127 sha = &shared_entry(e->grant_table, gop.ref);
1128 guest_physmap_add_page(e, sha->frame, mfn);
1129 sha->frame = mfn;
1130 wmb();
1131 sha->flags |= GTF_transfer_completed;
1133 spin_unlock(&e->grant_table->lock);
1135 rcu_unlock_domain(e);
1137 gop.status = GNTST_okay;
1139 copyback:
1140 if ( unlikely(__copy_to_guest_offset(uop, i, &gop, 1)) )
1142 gdprintk(XENLOG_INFO, "gnttab_transfer: error writing resp "
1143 "%d/%d\n", i, count);
1144 return -EFAULT;
1148 return 0;
1151 /* Undo __acquire_grant_for_copy. Again, this has no effect on page
1152 type and reference counts. */
1153 static void
1154 __release_grant_for_copy(
1155 struct domain *rd, unsigned long gref, int readonly)
1157 grant_entry_t *sha;
1158 struct active_grant_entry *act;
1159 unsigned long r_frame;
1161 spin_lock(&rd->grant_table->lock);
1163 act = &active_entry(rd->grant_table, gref);
1164 sha = &shared_entry(rd->grant_table, gref);
1165 r_frame = act->frame;
1167 if ( readonly )
1169 act->pin -= GNTPIN_hstr_inc;
1171 else
1173 gnttab_mark_dirty(rd, r_frame);
1175 act->pin -= GNTPIN_hstw_inc;
1176 if ( !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) )
1177 gnttab_clear_flag(_GTF_writing, &sha->flags);
1180 if ( !act->pin )
1181 gnttab_clear_flag(_GTF_reading, &sha->flags);
1183 spin_unlock(&rd->grant_table->lock);
1186 /* Grab a frame number from a grant entry and update the flags and pin
1187 count as appropriate. Note that this does *not* update the page
1188 type or reference counts, and does not check that the mfn is
1189 actually valid. */
1190 static int
1191 __acquire_grant_for_copy(
1192 struct domain *rd, unsigned long gref, int readonly,
1193 unsigned long *frame)
1195 grant_entry_t *sha;
1196 struct active_grant_entry *act;
1197 s16 rc = GNTST_okay;
1198 int retries = 0;
1199 union grant_combo scombo, prev_scombo, new_scombo;
1201 spin_lock(&rd->grant_table->lock);
1203 if ( unlikely(gref >= nr_grant_entries(rd->grant_table)) )
1204 PIN_FAIL(unlock_out, GNTST_bad_gntref,
1205 "Bad grant reference %ld\n", gref);
1207 act = &active_entry(rd->grant_table, gref);
1208 sha = &shared_entry(rd->grant_table, gref);
1210 /* If already pinned, check the active domid and avoid refcnt overflow. */
1211 if ( act->pin &&
1212 ((act->domid != current->domain->domain_id) ||
1213 (act->pin & 0x80808080U) != 0) )
1214 PIN_FAIL(unlock_out, GNTST_general_error,
1215 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
1216 act->domid, current->domain->domain_id, act->pin);
1218 if ( !act->pin ||
1219 (!readonly && !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask))) )
1221 scombo.word = *(u32 *)&sha->flags;
1223 for ( ; ; )
1225 /* If not already pinned, check the grant domid and type. */
1226 if ( !act->pin &&
1227 (((scombo.shorts.flags & GTF_type_mask) !=
1228 GTF_permit_access) ||
1229 (scombo.shorts.domid != current->domain->domain_id)) )
1230 PIN_FAIL(unlock_out, GNTST_general_error,
1231 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
1232 scombo.shorts.flags, scombo.shorts.domid,
1233 current->domain->domain_id);
1235 new_scombo = scombo;
1236 new_scombo.shorts.flags |= GTF_reading;
1238 if ( !readonly )
1240 new_scombo.shorts.flags |= GTF_writing;
1241 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
1242 PIN_FAIL(unlock_out, GNTST_general_error,
1243 "Attempt to write-pin a r/o grant entry.\n");
1246 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
1247 scombo.word, new_scombo.word);
1248 if ( likely(prev_scombo.word == scombo.word) )
1249 break;
1251 if ( retries++ == 4 )
1252 PIN_FAIL(unlock_out, GNTST_general_error,
1253 "Shared grant entry is unstable.\n");
1255 scombo = prev_scombo;
1258 if ( !act->pin )
1260 act->domid = scombo.shorts.domid;
1261 act->frame = gmfn_to_mfn(rd, sha->frame);
1265 act->pin += readonly ? GNTPIN_hstr_inc : GNTPIN_hstw_inc;
1267 *frame = act->frame;
1269 unlock_out:
1270 spin_unlock(&rd->grant_table->lock);
1271 return rc;
1274 static void
1275 __gnttab_copy(
1276 struct gnttab_copy *op)
1278 struct domain *sd = NULL, *dd = NULL;
1279 unsigned long s_frame, d_frame;
1280 char *sp, *dp;
1281 s16 rc = GNTST_okay;
1282 int have_d_grant = 0, have_s_grant = 0, have_s_ref = 0;
1283 int src_is_gref, dest_is_gref;
1285 if ( ((op->source.offset + op->len) > PAGE_SIZE) ||
1286 ((op->dest.offset + op->len) > PAGE_SIZE) )
1287 PIN_FAIL(error_out, GNTST_bad_copy_arg, "copy beyond page area.\n");
1289 src_is_gref = op->flags & GNTCOPY_source_gref;
1290 dest_is_gref = op->flags & GNTCOPY_dest_gref;
1292 if ( (op->source.domid != DOMID_SELF && !src_is_gref ) ||
1293 (op->dest.domid != DOMID_SELF && !dest_is_gref) )
1294 PIN_FAIL(error_out, GNTST_permission_denied,
1295 "only allow copy-by-mfn for DOMID_SELF.\n");
1297 if ( op->source.domid == DOMID_SELF )
1298 sd = rcu_lock_current_domain();
1299 else if ( (sd = rcu_lock_domain_by_id(op->source.domid)) == NULL )
1300 PIN_FAIL(error_out, GNTST_bad_domain,
1301 "couldn't find %d\n", op->source.domid);
1303 if ( op->dest.domid == DOMID_SELF )
1304 dd = rcu_lock_current_domain();
1305 else if ( (dd = rcu_lock_domain_by_id(op->dest.domid)) == NULL )
1306 PIN_FAIL(error_out, GNTST_bad_domain,
1307 "couldn't find %d\n", op->dest.domid);
1309 rc = xsm_grant_copy(sd, dd);
1310 if ( rc )
1312 rc = GNTST_permission_denied;
1313 goto error_out;
1316 if ( src_is_gref )
1318 rc = __acquire_grant_for_copy(sd, op->source.u.ref, 1, &s_frame);
1319 if ( rc != GNTST_okay )
1320 goto error_out;
1321 have_s_grant = 1;
1323 else
1325 s_frame = gmfn_to_mfn(sd, op->source.u.gmfn);
1327 if ( unlikely(!mfn_valid(s_frame)) )
1328 PIN_FAIL(error_out, GNTST_general_error,
1329 "source frame %lx invalid.\n", s_frame);
1330 if ( !get_page(mfn_to_page(s_frame), sd) )
1332 if ( !sd->is_dying )
1333 gdprintk(XENLOG_WARNING, "Could not get src frame %lx\n", s_frame);
1334 rc = GNTST_general_error;
1335 goto error_out;
1337 have_s_ref = 1;
1339 if ( dest_is_gref )
1341 rc = __acquire_grant_for_copy(dd, op->dest.u.ref, 0, &d_frame);
1342 if ( rc != GNTST_okay )
1343 goto error_out;
1344 have_d_grant = 1;
1346 else
1348 d_frame = gmfn_to_mfn(dd, op->dest.u.gmfn);
1350 if ( unlikely(!mfn_valid(d_frame)) )
1351 PIN_FAIL(error_out, GNTST_general_error,
1352 "destination frame %lx invalid.\n", d_frame);
1353 if ( !get_page_and_type(mfn_to_page(d_frame), dd, PGT_writable_page) )
1355 if ( !dd->is_dying )
1356 gdprintk(XENLOG_WARNING, "Could not get dst frame %lx\n", d_frame);
1357 rc = GNTST_general_error;
1358 goto error_out;
1361 sp = map_domain_page(s_frame);
1362 dp = map_domain_page(d_frame);
1364 memcpy(dp + op->dest.offset, sp + op->source.offset, op->len);
1366 unmap_domain_page(dp);
1367 unmap_domain_page(sp);
1369 gnttab_mark_dirty(dd, d_frame);
1371 put_page_and_type(mfn_to_page(d_frame));
1372 error_out:
1373 if ( have_s_ref )
1374 put_page(mfn_to_page(s_frame));
1375 if ( have_s_grant )
1376 __release_grant_for_copy(sd, op->source.u.ref, 1);
1377 if ( have_d_grant )
1378 __release_grant_for_copy(dd, op->dest.u.ref, 0);
1379 if ( sd )
1380 rcu_unlock_domain(sd);
1381 if ( dd )
1382 rcu_unlock_domain(dd);
1383 op->status = rc;
1386 static long
1387 gnttab_copy(
1388 XEN_GUEST_HANDLE(gnttab_copy_t) uop, unsigned int count)
1390 int i;
1391 struct gnttab_copy op;
1393 for ( i = 0; i < count; i++ )
1395 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
1396 return -EFAULT;
1397 __gnttab_copy(&op);
1398 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
1399 return -EFAULT;
1401 return 0;
1404 long
1405 do_grant_table_op(
1406 unsigned int cmd, XEN_GUEST_HANDLE(void) uop, unsigned int count)
1408 long rc;
1409 struct domain *d = current->domain;
1411 if ( count > 512 )
1412 return -EINVAL;
1414 LOCK_BIGLOCK(d);
1416 rc = -EFAULT;
1417 switch ( cmd )
1419 case GNTTABOP_map_grant_ref:
1421 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) map =
1422 guest_handle_cast(uop, gnttab_map_grant_ref_t);
1423 if ( unlikely(!guest_handle_okay(map, count)) )
1424 goto out;
1425 rc = gnttab_map_grant_ref(map, count);
1426 break;
1428 case GNTTABOP_unmap_grant_ref:
1430 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) unmap =
1431 guest_handle_cast(uop, gnttab_unmap_grant_ref_t);
1432 if ( unlikely(!guest_handle_okay(unmap, count)) )
1433 goto out;
1434 rc = gnttab_unmap_grant_ref(unmap, count);
1435 break;
1437 case GNTTABOP_unmap_and_replace:
1439 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) unmap =
1440 guest_handle_cast(uop, gnttab_unmap_and_replace_t);
1441 if ( unlikely(!guest_handle_okay(unmap, count)) )
1442 goto out;
1443 rc = -ENOSYS;
1444 if ( unlikely(!replace_grant_supported()) )
1445 goto out;
1446 rc = gnttab_unmap_and_replace(unmap, count);
1447 break;
1449 case GNTTABOP_setup_table:
1451 rc = gnttab_setup_table(
1452 guest_handle_cast(uop, gnttab_setup_table_t), count);
1453 break;
1455 case GNTTABOP_transfer:
1457 XEN_GUEST_HANDLE(gnttab_transfer_t) transfer =
1458 guest_handle_cast(uop, gnttab_transfer_t);
1459 if ( unlikely(!guest_handle_okay(transfer, count)) )
1460 goto out;
1461 rc = gnttab_transfer(transfer, count);
1462 break;
1464 case GNTTABOP_copy:
1466 XEN_GUEST_HANDLE(gnttab_copy_t) copy =
1467 guest_handle_cast(uop, gnttab_copy_t);
1468 if ( unlikely(!guest_handle_okay(copy, count)) )
1469 goto out;
1470 rc = gnttab_copy(copy, count);
1471 break;
1473 case GNTTABOP_query_size:
1475 rc = gnttab_query_size(
1476 guest_handle_cast(uop, gnttab_query_size_t), count);
1477 break;
1479 default:
1480 rc = -ENOSYS;
1481 break;
1484 out:
1485 UNLOCK_BIGLOCK(d);
1487 return rc;
1490 #ifdef CONFIG_COMPAT
1491 #include "compat/grant_table.c"
1492 #endif
1494 static unsigned int max_nr_active_grant_frames(void)
1496 return (((max_nr_grant_frames * (PAGE_SIZE / sizeof(grant_entry_t))) +
1497 ((PAGE_SIZE / sizeof(struct active_grant_entry))-1))
1498 / (PAGE_SIZE / sizeof(struct active_grant_entry)));
1501 int
1502 grant_table_create(
1503 struct domain *d)
1505 struct grant_table *t;
1506 int i;
1508 /* If this sizeof assertion fails, fix the function: shared_index */
1509 ASSERT(sizeof(grant_entry_t) == 8);
1511 if ( (t = xmalloc(struct grant_table)) == NULL )
1512 goto no_mem_0;
1514 /* Simple stuff. */
1515 memset(t, 0, sizeof(*t));
1516 spin_lock_init(&t->lock);
1517 t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
1519 /* Active grant table. */
1520 if ( (t->active = xmalloc_array(struct active_grant_entry *,
1521 max_nr_active_grant_frames())) == NULL )
1522 goto no_mem_1;
1523 memset(t->active, 0, max_nr_active_grant_frames() * sizeof(t->active[0]));
1524 for ( i = 0;
1525 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1527 if ( (t->active[i] = alloc_xenheap_page()) == NULL )
1528 goto no_mem_2;
1529 clear_page(t->active[i]);
1532 /* Tracking of mapped foreign frames table */
1533 if ( (t->maptrack = xmalloc_array(struct grant_mapping *,
1534 max_nr_maptrack_frames())) == NULL )
1535 goto no_mem_2;
1536 memset(t->maptrack, 0, max_nr_maptrack_frames() * sizeof(t->maptrack[0]));
1537 if ( (t->maptrack[0] = alloc_xenheap_page()) == NULL )
1538 goto no_mem_3;
1539 clear_page(t->maptrack[0]);
1540 t->maptrack_limit = PAGE_SIZE / sizeof(struct grant_mapping);
1541 for ( i = 0; i < t->maptrack_limit; i++ )
1542 t->maptrack[0][i].ref = i+1;
1544 /* Shared grant table. */
1545 if ( (t->shared = xmalloc_array(struct grant_entry *,
1546 max_nr_grant_frames)) == NULL )
1547 goto no_mem_3;
1548 memset(t->shared, 0, max_nr_grant_frames * sizeof(t->shared[0]));
1549 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1551 if ( (t->shared[i] = alloc_xenheap_page()) == NULL )
1552 goto no_mem_4;
1553 clear_page(t->shared[i]);
1556 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1557 gnttab_create_shared_page(d, t, i);
1559 /* Okay, install the structure. */
1560 d->grant_table = t;
1561 return 0;
1563 no_mem_4:
1564 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1565 free_xenheap_page(t->shared[i]);
1566 xfree(t->shared);
1567 no_mem_3:
1568 free_xenheap_page(t->maptrack[0]);
1569 xfree(t->maptrack);
1570 no_mem_2:
1571 for ( i = 0;
1572 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1573 free_xenheap_page(t->active[i]);
1574 xfree(t->active);
1575 no_mem_1:
1576 xfree(t);
1577 no_mem_0:
1578 return -ENOMEM;
1581 void
1582 gnttab_release_mappings(
1583 struct domain *d)
1585 struct grant_table *gt = d->grant_table;
1586 struct grant_mapping *map;
1587 grant_ref_t ref;
1588 grant_handle_t handle;
1589 struct domain *rd;
1590 struct active_grant_entry *act;
1591 struct grant_entry *sha;
1593 BUG_ON(!d->is_dying);
1595 for ( handle = 0; handle < gt->maptrack_limit; handle++ )
1597 map = &maptrack_entry(gt, handle);
1598 if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
1599 continue;
1601 ref = map->ref;
1603 gdprintk(XENLOG_INFO, "Grant release (%hu) ref:(%hu) "
1604 "flags:(%x) dom:(%hu)\n",
1605 handle, ref, map->flags, map->domid);
1607 rd = rcu_lock_domain_by_id(map->domid);
1608 if ( rd == NULL )
1610 /* Nothing to clear up... */
1611 map->flags = 0;
1612 continue;
1615 spin_lock(&rd->grant_table->lock);
1617 act = &active_entry(rd->grant_table, ref);
1618 sha = &shared_entry(rd->grant_table, ref);
1620 if ( map->flags & GNTMAP_readonly )
1622 if ( map->flags & GNTMAP_device_map )
1624 BUG_ON(!(act->pin & GNTPIN_devr_mask));
1625 act->pin -= GNTPIN_devr_inc;
1626 if ( !is_iomem_page(act->frame) )
1627 put_page(mfn_to_page(act->frame));
1630 if ( map->flags & GNTMAP_host_map )
1632 BUG_ON(!(act->pin & GNTPIN_hstr_mask));
1633 act->pin -= GNTPIN_hstr_inc;
1634 if ( !is_iomem_page(act->frame) )
1635 gnttab_release_put_page(mfn_to_page(act->frame));
1638 else
1640 if ( map->flags & GNTMAP_device_map )
1642 BUG_ON(!(act->pin & GNTPIN_devw_mask));
1643 act->pin -= GNTPIN_devw_inc;
1644 if ( !is_iomem_page(act->frame) )
1645 put_page_and_type(mfn_to_page(act->frame));
1648 if ( map->flags & GNTMAP_host_map )
1650 BUG_ON(!(act->pin & GNTPIN_hstw_mask));
1651 act->pin -= GNTPIN_hstw_inc;
1652 if ( !is_iomem_page(act->frame) )
1653 gnttab_release_put_page_and_type(mfn_to_page(act->frame));
1656 if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
1657 gnttab_clear_flag(_GTF_writing, &sha->flags);
1660 if ( act->pin == 0 )
1661 gnttab_clear_flag(_GTF_reading, &sha->flags);
1663 spin_unlock(&rd->grant_table->lock);
1665 rcu_unlock_domain(rd);
1667 map->flags = 0;
1672 void
1673 grant_table_destroy(
1674 struct domain *d)
1676 struct grant_table *t = d->grant_table;
1677 int i;
1679 if ( t == NULL )
1680 return;
1682 for ( i = 0; i < nr_grant_frames(t); i++ )
1683 free_xenheap_page(t->shared[i]);
1684 xfree(t->shared);
1686 for ( i = 0; i < nr_maptrack_frames(t); i++ )
1687 free_xenheap_page(t->maptrack[i]);
1688 xfree(t->maptrack);
1690 for ( i = 0; i < nr_active_grant_frames(t); i++ )
1691 free_xenheap_page(t->active[i]);
1692 xfree(t->active);
1694 xfree(t);
1695 d->grant_table = NULL;
1698 /*
1699 * Local variables:
1700 * mode: C
1701 * c-set-style: "BSD"
1702 * c-basic-offset: 4
1703 * tab-width: 4
1704 * indent-tabs-mode: nil
1705 * End:
1706 */