ia64/xen-unstable

view xen/common/grant_table.c @ 17062:0769835cf50f

x86 shadow: Reduce scope of shadow lock.

emulate_map_dest doesn't require holding lock, since
only shadow related operation possibly involved is to
remove shadow which is less frequent and can acquire
lock inside. Rest are either guest table walk or
per-vcpu monitor table manipulation

Signed-off-by Kevin Tian <kevin.tian@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 14 10:33:12 2008 +0000 (2008-02-14)
parents cff4c8a1aa28
children cb3e47897b85
line source
1 /******************************************************************************
2 * common/grant_table.c
3 *
4 * Mechanism for granting foreign access to page frames, and receiving
5 * page-ownership transfers.
6 *
7 * Copyright (c) 2005-2006 Christopher Clark
8 * Copyright (c) 2004 K A Fraser
9 * Copyright (c) 2005 Andrew Warfield
10 * Modifications by Geoffrey Lefebvre are (c) Intel Research Cambridge
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
27 #include <xen/config.h>
28 #include <xen/iocap.h>
29 #include <xen/lib.h>
30 #include <xen/sched.h>
31 #include <xen/mm.h>
32 #include <xen/trace.h>
33 #include <xen/guest_access.h>
34 #include <xen/domain_page.h>
35 #include <xsm/xsm.h>
37 #ifndef max_nr_grant_frames
38 unsigned int max_nr_grant_frames = DEFAULT_MAX_NR_GRANT_FRAMES;
39 integer_param("gnttab_max_nr_frames", max_nr_grant_frames);
40 #endif
42 /* The maximum number of grant mappings is defined as a multiplier of the
43 * maximum number of grant table entries. This defines the multiplier used.
44 * Pretty arbitrary. [POLICY]
45 */
46 #define MAX_MAPTRACK_TO_GRANTS_RATIO 8
48 /*
49 * The first two members of a grant entry are updated as a combined pair.
50 * The following union allows that to happen in an endian-neutral fashion.
51 */
52 union grant_combo {
53 uint32_t word;
54 struct {
55 uint16_t flags;
56 domid_t domid;
57 } shorts;
58 };
60 /* Used to share code between unmap_grant_ref and unmap_and_replace. */
61 struct gnttab_unmap_common {
62 /* Input */
63 uint64_t host_addr;
64 uint64_t dev_bus_addr;
65 uint64_t new_addr;
66 grant_handle_t handle;
68 /* Return */
69 int16_t status;
71 /* Shared state beteen *_unmap and *_unmap_complete */
72 u16 flags;
73 unsigned long frame;
74 struct grant_mapping *map;
75 struct domain *rd;
76 };
78 /* Number of unmap operations that are done between each tlb flush */
79 #define GNTTAB_UNMAP_BATCH_SIZE 32
82 #define PIN_FAIL(_lbl, _rc, _f, _a...) \
83 do { \
84 gdprintk(XENLOG_WARNING, _f, ## _a ); \
85 rc = (_rc); \
86 goto _lbl; \
87 } while ( 0 )
89 #define MAPTRACK_PER_PAGE (PAGE_SIZE / sizeof(struct grant_mapping))
90 #define maptrack_entry(t, e) \
91 ((t)->maptrack[(e)/MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE])
93 static inline unsigned int
94 nr_maptrack_frames(struct grant_table *t)
95 {
96 return t->maptrack_limit / MAPTRACK_PER_PAGE;
97 }
99 static unsigned inline int max_nr_maptrack_frames(void)
100 {
101 return (max_nr_grant_frames * MAX_MAPTRACK_TO_GRANTS_RATIO);
102 }
105 #define SHGNT_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_t))
106 #define shared_entry(t, e) \
107 ((t)->shared[(e)/SHGNT_PER_PAGE][(e)%SHGNT_PER_PAGE])
108 #define ACGNT_PER_PAGE (PAGE_SIZE / sizeof(struct active_grant_entry))
109 #define active_entry(t, e) \
110 ((t)->active[(e)/ACGNT_PER_PAGE][(e)%ACGNT_PER_PAGE])
112 static inline int
113 __get_maptrack_handle(
114 struct grant_table *t)
115 {
116 unsigned int h;
117 if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) )
118 return -1;
119 t->maptrack_head = maptrack_entry(t, h).ref;
120 t->map_count++;
121 return h;
122 }
124 static inline void
125 put_maptrack_handle(
126 struct grant_table *t, int handle)
127 {
128 maptrack_entry(t, handle).ref = t->maptrack_head;
129 t->maptrack_head = handle;
130 t->map_count--;
131 }
133 static inline int
134 get_maptrack_handle(
135 struct grant_table *lgt)
136 {
137 int i;
138 grant_handle_t handle;
139 struct grant_mapping *new_mt;
140 unsigned int new_mt_limit, nr_frames;
142 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
143 {
144 spin_lock(&lgt->lock);
146 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
147 {
148 nr_frames = nr_maptrack_frames(lgt);
149 if ( nr_frames >= max_nr_maptrack_frames() )
150 {
151 spin_unlock(&lgt->lock);
152 return -1;
153 }
155 new_mt = alloc_xenheap_page();
156 if ( new_mt == NULL )
157 {
158 spin_unlock(&lgt->lock);
159 return -1;
160 }
162 clear_page(new_mt);
164 new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE;
166 for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
167 {
168 new_mt[i % MAPTRACK_PER_PAGE].ref = i+1;
169 new_mt[i % MAPTRACK_PER_PAGE].flags = 0;
170 }
172 lgt->maptrack[nr_frames] = new_mt;
173 lgt->maptrack_limit = new_mt_limit;
175 gdprintk(XENLOG_INFO,
176 "Increased maptrack size to %u frames.\n", nr_frames + 1);
177 handle = __get_maptrack_handle(lgt);
178 }
180 spin_unlock(&lgt->lock);
181 }
182 return handle;
183 }
185 /*
186 * Returns 0 if TLB flush / invalidate required by caller.
187 * va will indicate the address to be invalidated.
188 *
189 * addr is _either_ a host virtual address, or the address of the pte to
190 * update, as indicated by the GNTMAP_contains_pte flag.
191 */
192 static void
193 __gnttab_map_grant_ref(
194 struct gnttab_map_grant_ref *op)
195 {
196 struct domain *ld, *rd;
197 struct vcpu *led;
198 int handle;
199 unsigned long frame = 0;
200 int rc = GNTST_okay;
201 unsigned int cache_flags;
202 struct active_grant_entry *act;
203 struct grant_mapping *mt;
204 grant_entry_t *sha;
205 union grant_combo scombo, prev_scombo, new_scombo;
207 /*
208 * We bound the number of times we retry CMPXCHG on memory locations that
209 * we share with a guest OS. The reason is that the guest can modify that
210 * location at a higher rate than we can read-modify-CMPXCHG, so the guest
211 * could cause us to livelock. There are a few cases where it is valid for
212 * the guest to race our updates (e.g., to change the GTF_readonly flag),
213 * so we allow a few retries before failing.
214 */
215 int retries = 0;
217 led = current;
218 ld = led->domain;
220 if ( unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) )
221 {
222 gdprintk(XENLOG_INFO, "Bad flags in grant map op (%x).\n", op->flags);
223 op->status = GNTST_bad_gntref;
224 return;
225 }
227 if ( unlikely((rd = rcu_lock_domain_by_id(op->dom)) == NULL) )
228 {
229 gdprintk(XENLOG_INFO, "Could not find domain %d\n", op->dom);
230 op->status = GNTST_bad_domain;
231 return;
232 }
234 rc = xsm_grant_mapref(ld, rd, op->flags);
235 if ( rc )
236 {
237 rcu_unlock_domain(rd);
238 op->status = GNTST_permission_denied;
239 return;
240 }
242 if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) )
243 {
244 rcu_unlock_domain(rd);
245 gdprintk(XENLOG_INFO, "Failed to obtain maptrack handle.\n");
246 op->status = GNTST_no_device_space;
247 return;
248 }
250 spin_lock(&rd->grant_table->lock);
252 /* Bounds check on the grant ref */
253 if ( unlikely(op->ref >= nr_grant_entries(rd->grant_table)))
254 PIN_FAIL(unlock_out, GNTST_bad_gntref, "Bad ref (%d).\n", op->ref);
256 act = &active_entry(rd->grant_table, op->ref);
257 sha = &shared_entry(rd->grant_table, op->ref);
259 /* If already pinned, check the active domid and avoid refcnt overflow. */
260 if ( act->pin &&
261 ((act->domid != ld->domain_id) ||
262 (act->pin & 0x80808080U) != 0) )
263 PIN_FAIL(unlock_out, GNTST_general_error,
264 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
265 act->domid, ld->domain_id, act->pin);
267 if ( !act->pin ||
268 (!(op->flags & GNTMAP_readonly) &&
269 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask))) )
270 {
271 scombo.word = *(u32 *)&sha->flags;
273 /*
274 * This loop attempts to set the access (reading/writing) flags
275 * in the grant table entry. It tries a cmpxchg on the field
276 * up to five times, and then fails under the assumption that
277 * the guest is misbehaving.
278 */
279 for ( ; ; )
280 {
281 /* If not already pinned, check the grant domid and type. */
282 if ( !act->pin &&
283 (((scombo.shorts.flags & GTF_type_mask) !=
284 GTF_permit_access) ||
285 (scombo.shorts.domid != ld->domain_id)) )
286 PIN_FAIL(unlock_out, GNTST_general_error,
287 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
288 scombo.shorts.flags, scombo.shorts.domid,
289 ld->domain_id);
291 new_scombo = scombo;
292 new_scombo.shorts.flags |= GTF_reading;
294 if ( !(op->flags & GNTMAP_readonly) )
295 {
296 new_scombo.shorts.flags |= GTF_writing;
297 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
298 PIN_FAIL(unlock_out, GNTST_general_error,
299 "Attempt to write-pin a r/o grant entry.\n");
300 }
302 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
303 scombo.word, new_scombo.word);
304 if ( likely(prev_scombo.word == scombo.word) )
305 break;
307 if ( retries++ == 4 )
308 PIN_FAIL(unlock_out, GNTST_general_error,
309 "Shared grant entry is unstable.\n");
311 scombo = prev_scombo;
312 }
314 if ( !act->pin )
315 {
316 act->domid = scombo.shorts.domid;
317 act->frame = gmfn_to_mfn(rd, sha->frame);
318 }
319 }
321 if ( op->flags & GNTMAP_device_map )
322 act->pin += (op->flags & GNTMAP_readonly) ?
323 GNTPIN_devr_inc : GNTPIN_devw_inc;
324 if ( op->flags & GNTMAP_host_map )
325 act->pin += (op->flags & GNTMAP_readonly) ?
326 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
328 frame = act->frame;
330 cache_flags = (sha->flags & (GTF_PAT | GTF_PWT | GTF_PCD) );
332 spin_unlock(&rd->grant_table->lock);
334 if ( is_iomem_page(frame) )
335 {
336 if ( !iomem_access_permitted(rd, frame, frame) )
337 {
338 gdprintk(XENLOG_WARNING,
339 "Iomem mapping not permitted %lx (domain %d)\n",
340 frame, rd->domain_id);
341 rc = GNTST_general_error;
342 goto undo_out;
343 }
345 rc = create_grant_host_mapping(
346 op->host_addr, frame, op->flags, cache_flags);
347 if ( rc != GNTST_okay )
348 goto undo_out;
349 }
350 else
351 {
352 if ( unlikely(!mfn_valid(frame)) ||
353 unlikely(!((op->flags & GNTMAP_readonly) ?
354 get_page(mfn_to_page(frame), rd) :
355 get_page_and_type(mfn_to_page(frame), rd,
356 PGT_writable_page))) )
357 {
358 if ( !rd->is_dying )
359 gdprintk(XENLOG_WARNING, "Could not pin grant frame %lx\n",
360 frame);
361 rc = GNTST_general_error;
362 goto undo_out;
363 }
365 if ( op->flags & GNTMAP_host_map )
366 {
367 rc = create_grant_host_mapping(op->host_addr, frame, op->flags, 0);
368 if ( rc != GNTST_okay )
369 {
370 if ( !(op->flags & GNTMAP_readonly) )
371 put_page_type(mfn_to_page(frame));
372 put_page(mfn_to_page(frame));
373 goto undo_out;
374 }
376 if ( op->flags & GNTMAP_device_map )
377 {
378 (void)get_page(mfn_to_page(frame), rd);
379 if ( !(op->flags & GNTMAP_readonly) )
380 get_page_type(mfn_to_page(frame), PGT_writable_page);
381 }
382 }
383 }
385 TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
387 mt = &maptrack_entry(ld->grant_table, handle);
388 mt->domid = op->dom;
389 mt->ref = op->ref;
390 mt->flags = op->flags;
392 op->dev_bus_addr = (u64)frame << PAGE_SHIFT;
393 op->handle = handle;
394 op->status = GNTST_okay;
396 rcu_unlock_domain(rd);
397 return;
399 undo_out:
400 spin_lock(&rd->grant_table->lock);
402 act = &active_entry(rd->grant_table, op->ref);
403 sha = &shared_entry(rd->grant_table, op->ref);
405 if ( op->flags & GNTMAP_device_map )
406 act->pin -= (op->flags & GNTMAP_readonly) ?
407 GNTPIN_devr_inc : GNTPIN_devw_inc;
408 if ( op->flags & GNTMAP_host_map )
409 act->pin -= (op->flags & GNTMAP_readonly) ?
410 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
412 if ( !(op->flags & GNTMAP_readonly) &&
413 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
414 gnttab_clear_flag(_GTF_writing, &sha->flags);
416 if ( !act->pin )
417 gnttab_clear_flag(_GTF_reading, &sha->flags);
419 unlock_out:
420 spin_unlock(&rd->grant_table->lock);
421 op->status = rc;
422 put_maptrack_handle(ld->grant_table, handle);
423 rcu_unlock_domain(rd);
424 }
426 static long
427 gnttab_map_grant_ref(
428 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) uop, unsigned int count)
429 {
430 int i;
431 struct gnttab_map_grant_ref op;
433 for ( i = 0; i < count; i++ )
434 {
435 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
436 return -EFAULT;
437 __gnttab_map_grant_ref(&op);
438 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
439 return -EFAULT;
440 }
442 return 0;
443 }
445 static void
446 __gnttab_unmap_common(
447 struct gnttab_unmap_common *op)
448 {
449 domid_t dom;
450 struct domain *ld, *rd;
451 struct active_grant_entry *act;
452 grant_entry_t *sha;
453 s16 rc = 0;
455 ld = current->domain;
457 op->frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT);
459 if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) )
460 {
461 gdprintk(XENLOG_INFO, "Bad handle (%d).\n", op->handle);
462 op->status = GNTST_bad_handle;
463 return;
464 }
466 op->map = &maptrack_entry(ld->grant_table, op->handle);
468 if ( unlikely(!op->map->flags) )
469 {
470 gdprintk(XENLOG_INFO, "Zero flags for handle (%d).\n", op->handle);
471 op->status = GNTST_bad_handle;
472 return;
473 }
475 dom = op->map->domid;
476 op->flags = op->map->flags;
478 if ( unlikely((op->rd = rd = rcu_lock_domain_by_id(dom)) == NULL) )
479 {
480 /* This can happen when a grant is implicitly unmapped. */
481 gdprintk(XENLOG_INFO, "Could not find domain %d\n", dom);
482 domain_crash(ld); /* naughty... */
483 return;
484 }
486 rc = xsm_grant_unmapref(ld, rd);
487 if ( rc )
488 {
489 rcu_unlock_domain(rd);
490 op->status = GNTST_permission_denied;
491 return;
492 }
494 TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom);
496 spin_lock(&rd->grant_table->lock);
498 act = &active_entry(rd->grant_table, op->map->ref);
499 sha = &shared_entry(rd->grant_table, op->map->ref);
501 if ( op->frame == 0 )
502 {
503 op->frame = act->frame;
504 }
505 else
506 {
507 if ( unlikely(op->frame != act->frame) )
508 PIN_FAIL(unmap_out, GNTST_general_error,
509 "Bad frame number doesn't match gntref. (%lx != %lx)\n",
510 op->frame, act->frame);
511 if ( op->flags & GNTMAP_device_map )
512 {
513 ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
514 op->map->flags &= ~GNTMAP_device_map;
515 if ( op->flags & GNTMAP_readonly )
516 act->pin -= GNTPIN_devr_inc;
517 else
518 act->pin -= GNTPIN_devw_inc;
519 }
520 }
522 if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
523 {
524 if ( (rc = replace_grant_host_mapping(op->host_addr,
525 op->frame, op->new_addr,
526 op->flags)) < 0 )
527 goto unmap_out;
529 ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
530 op->map->flags &= ~GNTMAP_host_map;
531 if ( op->flags & GNTMAP_readonly )
532 act->pin -= GNTPIN_hstr_inc;
533 else
534 act->pin -= GNTPIN_hstw_inc;
535 }
537 /* If just unmapped a writable mapping, mark as dirtied */
538 if ( !(op->flags & GNTMAP_readonly) )
539 gnttab_mark_dirty(rd, op->frame);
541 unmap_out:
542 op->status = rc;
543 spin_unlock(&rd->grant_table->lock);
544 rcu_unlock_domain(rd);
545 }
547 static void
548 __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
549 {
550 struct domain *ld, *rd;
551 struct active_grant_entry *act;
552 grant_entry_t *sha;
554 rd = op->rd;
556 if ( rd == NULL )
557 {
558 /*
559 * Suggests that __gntab_unmap_common failed in
560 * rcu_lock_domain_by_id() or earlier, and so we have nothing
561 * to complete
562 */
563 return;
564 }
566 ld = current->domain;
568 rcu_lock_domain(rd);
569 spin_lock(&rd->grant_table->lock);
571 act = &active_entry(rd->grant_table, op->map->ref);
572 sha = &shared_entry(rd->grant_table, op->map->ref);
574 if ( unlikely(op->frame != act->frame) )
575 {
576 /*
577 * Suggests that __gntab_unmap_common failed early and so
578 * nothing further to do
579 */
580 goto unmap_out;
581 }
583 if ( op->flags & GNTMAP_device_map )
584 {
585 if ( !is_iomem_page(act->frame) )
586 {
587 if ( op->flags & GNTMAP_readonly )
588 put_page(mfn_to_page(op->frame));
589 else
590 put_page_and_type(mfn_to_page(op->frame));
591 }
592 }
594 if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
595 {
596 if ( op->status != 0 )
597 {
598 /*
599 * Suggests that __gntab_unmap_common failed in
600 * replace_grant_host_mapping() so nothing further to do
601 */
602 goto unmap_out;
603 }
605 if ( !is_iomem_page(op->frame) )
606 {
607 if ( !(op->flags & GNTMAP_readonly) )
608 put_page_type(mfn_to_page(op->frame));
609 put_page(mfn_to_page(op->frame));
610 }
611 }
613 if ( (op->map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
614 {
615 op->map->flags = 0;
616 put_maptrack_handle(ld->grant_table, op->handle);
617 }
619 if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
620 !(op->flags & GNTMAP_readonly) )
621 gnttab_clear_flag(_GTF_writing, &sha->flags);
623 if ( act->pin == 0 )
624 gnttab_clear_flag(_GTF_reading, &sha->flags);
626 unmap_out:
627 spin_unlock(&rd->grant_table->lock);
628 rcu_unlock_domain(rd);
629 }
631 static void
632 __gnttab_unmap_grant_ref(
633 struct gnttab_unmap_grant_ref *op,
634 struct gnttab_unmap_common *common)
635 {
636 common->host_addr = op->host_addr;
637 common->dev_bus_addr = op->dev_bus_addr;
638 common->handle = op->handle;
640 /* Intialise these in case common contains old state */
641 common->new_addr = 0;
642 common->rd = NULL;
644 __gnttab_unmap_common(common);
645 op->status = common->status;
646 }
649 static long
650 gnttab_unmap_grant_ref(
651 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) uop, unsigned int count)
652 {
653 int i, c, partial_done, done = 0;
654 struct gnttab_unmap_grant_ref op;
655 struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
657 while ( count != 0 )
658 {
659 c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
660 partial_done = 0;
662 for ( i = 0; i < c; i++ )
663 {
664 if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
665 goto fault;
666 __gnttab_unmap_grant_ref(&op, &(common[i]));
667 ++partial_done;
668 if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
669 goto fault;
670 }
672 flush_tlb_mask(current->domain->domain_dirty_cpumask);
674 for ( i = 0; i < partial_done; i++ )
675 __gnttab_unmap_common_complete(&(common[i]));
677 count -= c;
678 done += c;
679 }
681 return 0;
683 fault:
684 flush_tlb_mask(current->domain->domain_dirty_cpumask);
686 for ( i = 0; i < partial_done; i++ )
687 __gnttab_unmap_common_complete(&(common[i]));
688 return -EFAULT;
689 }
691 static void
692 __gnttab_unmap_and_replace(
693 struct gnttab_unmap_and_replace *op,
694 struct gnttab_unmap_common *common)
695 {
696 common->host_addr = op->host_addr;
697 common->new_addr = op->new_addr;
698 common->handle = op->handle;
700 /* Intialise these in case common contains old state */
701 common->dev_bus_addr = 0;
702 common->rd = NULL;
704 __gnttab_unmap_common(common);
705 op->status = common->status;
706 }
708 static long
709 gnttab_unmap_and_replace(
710 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) uop, unsigned int count)
711 {
712 int i, c, partial_done, done = 0;
713 struct gnttab_unmap_and_replace op;
714 struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
716 while ( count != 0 )
717 {
718 c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
719 partial_done = 0;
721 for ( i = 0; i < c; i++ )
722 {
723 if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
724 goto fault;
725 __gnttab_unmap_and_replace(&op, &(common[i]));
726 ++partial_done;
727 if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
728 goto fault;
729 }
731 flush_tlb_mask(current->domain->domain_dirty_cpumask);
733 for ( i = 0; i < partial_done; i++ )
734 __gnttab_unmap_common_complete(&(common[i]));
736 count -= c;
737 done += c;
738 }
740 return 0;
742 fault:
743 flush_tlb_mask(current->domain->domain_dirty_cpumask);
745 for ( i = 0; i < partial_done; i++ )
746 __gnttab_unmap_common_complete(&(common[i]));
747 return -EFAULT;
748 }
750 int
751 gnttab_grow_table(struct domain *d, unsigned int req_nr_frames)
752 {
753 /* d's grant table lock must be held by the caller */
755 struct grant_table *gt = d->grant_table;
756 unsigned int i;
758 ASSERT(req_nr_frames <= max_nr_grant_frames);
760 gdprintk(XENLOG_INFO,
761 "Expanding dom (%d) grant table from (%d) to (%d) frames.\n",
762 d->domain_id, nr_grant_frames(gt), req_nr_frames);
764 /* Active */
765 for ( i = nr_active_grant_frames(gt);
766 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
767 {
768 if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
769 goto active_alloc_failed;
770 clear_page(gt->active[i]);
771 }
773 /* Shared */
774 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
775 {
776 if ( (gt->shared[i] = alloc_xenheap_page()) == NULL )
777 goto shared_alloc_failed;
778 clear_page(gt->shared[i]);
779 }
781 /* Share the new shared frames with the recipient domain */
782 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
783 gnttab_create_shared_page(d, gt, i);
785 gt->nr_grant_frames = req_nr_frames;
787 return 1;
789 shared_alloc_failed:
790 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
791 {
792 free_xenheap_page(gt->shared[i]);
793 gt->shared[i] = NULL;
794 }
795 active_alloc_failed:
796 for ( i = nr_active_grant_frames(gt);
797 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
798 {
799 free_xenheap_page(gt->active[i]);
800 gt->active[i] = NULL;
801 }
802 gdprintk(XENLOG_INFO, "Allocation failure when expanding grant table.\n");
803 return 0;
804 }
806 static long
807 gnttab_setup_table(
808 XEN_GUEST_HANDLE(gnttab_setup_table_t) uop, unsigned int count)
809 {
810 struct gnttab_setup_table op;
811 struct domain *d;
812 int i;
813 unsigned long gmfn;
814 domid_t dom;
816 if ( count != 1 )
817 return -EINVAL;
819 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
820 {
821 gdprintk(XENLOG_INFO, "Fault while reading gnttab_setup_table_t.\n");
822 return -EFAULT;
823 }
825 if ( unlikely(op.nr_frames > max_nr_grant_frames) )
826 {
827 gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table frames"
828 " per domain.\n",
829 max_nr_grant_frames);
830 op.status = GNTST_general_error;
831 goto out;
832 }
834 dom = op.dom;
835 if ( dom == DOMID_SELF )
836 {
837 d = current->domain;
838 }
839 else {
840 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
841 {
842 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
843 op.status = GNTST_bad_domain;
844 goto out;
845 }
846 if ( unlikely(!IS_PRIV_FOR(current->domain, d)) ) {
847 op.status = GNTST_permission_denied;
848 goto setup_unlock_out2;
849 }
850 }
852 if ( xsm_grant_setup(current->domain, d) )
853 {
854 rcu_unlock_domain(d);
855 op.status = GNTST_permission_denied;
856 goto out;
857 }
859 spin_lock(&d->grant_table->lock);
861 if ( (op.nr_frames > nr_grant_frames(d->grant_table)) &&
862 !gnttab_grow_table(d, op.nr_frames) )
863 {
864 gdprintk(XENLOG_INFO,
865 "Expand grant table to %d failed. Current: %d Max: %d.\n",
866 op.nr_frames,
867 nr_grant_frames(d->grant_table),
868 max_nr_grant_frames);
869 op.status = GNTST_general_error;
870 goto setup_unlock_out;
871 }
873 op.status = GNTST_okay;
874 for ( i = 0; i < op.nr_frames; i++ )
875 {
876 gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
877 (void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
878 }
880 setup_unlock_out:
881 spin_unlock(&d->grant_table->lock);
883 setup_unlock_out2:
884 rcu_unlock_domain(d);
886 out:
887 if ( unlikely(copy_to_guest(uop, &op, 1)) )
888 return -EFAULT;
890 return 0;
891 }
893 static long
894 gnttab_query_size(
895 XEN_GUEST_HANDLE(gnttab_query_size_t) uop, unsigned int count)
896 {
897 struct gnttab_query_size op;
898 struct domain *d;
899 domid_t dom;
900 int rc;
902 if ( count != 1 )
903 return -EINVAL;
905 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
906 {
907 gdprintk(XENLOG_INFO, "Fault while reading gnttab_query_size_t.\n");
908 return -EFAULT;
909 }
911 dom = op.dom;
912 if ( dom == DOMID_SELF )
913 {
914 d = current->domain;
915 }
916 else {
917 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
918 {
919 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
920 op.status = GNTST_bad_domain;
921 goto query_out;
922 }
923 if ( unlikely(!IS_PRIV_FOR(current->domain, d)) ) {
924 op.status = GNTST_permission_denied;
925 goto query_out_unlock;
926 }
927 }
929 rc = xsm_grant_query_size(current->domain, d);
930 if ( rc )
931 {
932 op.status = GNTST_permission_denied;
933 goto query_out_unlock;
934 }
936 spin_lock(&d->grant_table->lock);
938 op.nr_frames = nr_grant_frames(d->grant_table);
939 op.max_nr_frames = max_nr_grant_frames;
940 op.status = GNTST_okay;
942 spin_unlock(&d->grant_table->lock);
945 query_out_unlock:
946 rcu_unlock_domain(d);
948 query_out:
949 if ( unlikely(copy_to_guest(uop, &op, 1)) )
950 return -EFAULT;
952 return 0;
953 }
955 /*
956 * Check that the given grant reference (rd,ref) allows 'ld' to transfer
957 * ownership of a page frame. If so, lock down the grant entry.
958 */
959 static int
960 gnttab_prepare_for_transfer(
961 struct domain *rd, struct domain *ld, grant_ref_t ref)
962 {
963 struct grant_table *rgt;
964 struct grant_entry *sha;
965 union grant_combo scombo, prev_scombo, new_scombo;
966 int retries = 0;
968 if ( unlikely((rgt = rd->grant_table) == NULL) )
969 {
970 gdprintk(XENLOG_INFO, "Dom %d has no grant table.\n", rd->domain_id);
971 return 0;
972 }
974 spin_lock(&rgt->lock);
976 if ( unlikely(ref >= nr_grant_entries(rd->grant_table)) )
977 {
978 gdprintk(XENLOG_INFO,
979 "Bad grant reference (%d) for transfer to domain(%d).\n",
980 ref, rd->domain_id);
981 goto fail;
982 }
984 sha = &shared_entry(rgt, ref);
986 scombo.word = *(u32 *)&sha->flags;
988 for ( ; ; )
989 {
990 if ( unlikely(scombo.shorts.flags != GTF_accept_transfer) ||
991 unlikely(scombo.shorts.domid != ld->domain_id) )
992 {
993 gdprintk(XENLOG_INFO, "Bad flags (%x) or dom (%d). "
994 "(NB. expected dom %d)\n",
995 scombo.shorts.flags, scombo.shorts.domid,
996 ld->domain_id);
997 goto fail;
998 }
1000 new_scombo = scombo;
1001 new_scombo.shorts.flags |= GTF_transfer_committed;
1003 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
1004 scombo.word, new_scombo.word);
1005 if ( likely(prev_scombo.word == scombo.word) )
1006 break;
1008 if ( retries++ == 4 )
1010 gdprintk(XENLOG_WARNING, "Shared grant entry is unstable.\n");
1011 goto fail;
1014 scombo = prev_scombo;
1017 spin_unlock(&rgt->lock);
1018 return 1;
1020 fail:
1021 spin_unlock(&rgt->lock);
1022 return 0;
1025 static long
1026 gnttab_transfer(
1027 XEN_GUEST_HANDLE(gnttab_transfer_t) uop, unsigned int count)
1029 struct domain *d = current->domain;
1030 struct domain *e;
1031 struct page_info *page;
1032 int i;
1033 grant_entry_t *sha;
1034 struct gnttab_transfer gop;
1035 unsigned long mfn;
1036 unsigned int max_bitsize;
1038 for ( i = 0; i < count; i++ )
1040 /* Read from caller address space. */
1041 if ( unlikely(__copy_from_guest_offset(&gop, uop, i, 1)) )
1043 gdprintk(XENLOG_INFO, "gnttab_transfer: error reading req %d/%d\n",
1044 i, count);
1045 return -EFAULT;
1048 mfn = gmfn_to_mfn(d, gop.mfn);
1050 /* Check the passed page frame for basic validity. */
1051 if ( unlikely(!mfn_valid(mfn)) )
1053 gdprintk(XENLOG_INFO, "gnttab_transfer: out-of-range %lx\n",
1054 (unsigned long)gop.mfn);
1055 gop.status = GNTST_bad_page;
1056 goto copyback;
1059 page = mfn_to_page(mfn);
1060 if ( unlikely(is_xen_heap_page(page)) )
1062 gdprintk(XENLOG_INFO, "gnttab_transfer: xen frame %lx\n",
1063 (unsigned long)gop.mfn);
1064 gop.status = GNTST_bad_page;
1065 goto copyback;
1068 if ( steal_page(d, page, 0) < 0 )
1070 gop.status = GNTST_bad_page;
1071 goto copyback;
1074 /* Find the target domain. */
1075 if ( unlikely((e = rcu_lock_domain_by_id(gop.domid)) == NULL) )
1077 gdprintk(XENLOG_INFO, "gnttab_transfer: can't find domain %d\n",
1078 gop.domid);
1079 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1080 free_domheap_page(page);
1081 gop.status = GNTST_bad_domain;
1082 goto copyback;
1085 if ( xsm_grant_transfer(d, e) )
1087 gop.status = GNTST_permission_denied;
1088 unlock_and_copyback:
1089 rcu_unlock_domain(e);
1090 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1091 free_domheap_page(page);
1092 goto copyback;
1095 max_bitsize = domain_clamp_alloc_bitsize(
1096 e, BITS_PER_LONG+PAGE_SHIFT-1);
1097 if ( (1UL << (max_bitsize - PAGE_SHIFT)) <= mfn )
1099 struct page_info *new_page;
1100 void *sp, *dp;
1102 new_page = alloc_domheap_pages(NULL, 0, MEMF_bits(max_bitsize));
1103 if ( new_page == NULL )
1105 gop.status = GNTST_address_too_big;
1106 goto unlock_and_copyback;
1109 sp = map_domain_page(mfn);
1110 dp = map_domain_page(page_to_mfn(new_page));
1111 memcpy(dp, sp, PAGE_SIZE);
1112 unmap_domain_page(dp);
1113 unmap_domain_page(sp);
1115 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1116 free_domheap_page(page);
1117 page = new_page;
1120 spin_lock(&e->page_alloc_lock);
1122 /*
1123 * Check that 'e' will accept the page and has reservation
1124 * headroom. Also, a domain mustn't have PGC_allocated
1125 * pages when it is dying.
1126 */
1127 if ( unlikely(e->is_dying) ||
1128 unlikely(e->tot_pages >= e->max_pages) ||
1129 unlikely(!gnttab_prepare_for_transfer(e, d, gop.ref)) )
1131 if ( !e->is_dying )
1132 gdprintk(XENLOG_INFO, "gnttab_transfer: "
1133 "Transferee has no reservation "
1134 "headroom (%d,%d) or provided a bad grant ref (%08x) "
1135 "or is dying (%d)\n",
1136 e->tot_pages, e->max_pages, gop.ref, e->is_dying);
1137 spin_unlock(&e->page_alloc_lock);
1138 rcu_unlock_domain(e);
1139 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1140 free_domheap_page(page);
1141 gop.status = GNTST_general_error;
1142 goto copyback;
1145 /* Okay, add the page to 'e'. */
1146 if ( unlikely(e->tot_pages++ == 0) )
1147 get_knownalive_domain(e);
1148 list_add_tail(&page->list, &e->page_list);
1149 page_set_owner(page, e);
1151 spin_unlock(&e->page_alloc_lock);
1153 TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
1155 /* Tell the guest about its new page frame. */
1156 spin_lock(&e->grant_table->lock);
1158 sha = &shared_entry(e->grant_table, gop.ref);
1159 guest_physmap_add_page(e, sha->frame, mfn);
1160 sha->frame = mfn;
1161 wmb();
1162 sha->flags |= GTF_transfer_completed;
1164 spin_unlock(&e->grant_table->lock);
1166 rcu_unlock_domain(e);
1168 gop.status = GNTST_okay;
1170 copyback:
1171 if ( unlikely(__copy_to_guest_offset(uop, i, &gop, 1)) )
1173 gdprintk(XENLOG_INFO, "gnttab_transfer: error writing resp "
1174 "%d/%d\n", i, count);
1175 return -EFAULT;
1179 return 0;
1182 /* Undo __acquire_grant_for_copy. Again, this has no effect on page
1183 type and reference counts. */
1184 static void
1185 __release_grant_for_copy(
1186 struct domain *rd, unsigned long gref, int readonly)
1188 grant_entry_t *sha;
1189 struct active_grant_entry *act;
1190 unsigned long r_frame;
1192 spin_lock(&rd->grant_table->lock);
1194 act = &active_entry(rd->grant_table, gref);
1195 sha = &shared_entry(rd->grant_table, gref);
1196 r_frame = act->frame;
1198 if ( readonly )
1200 act->pin -= GNTPIN_hstr_inc;
1202 else
1204 gnttab_mark_dirty(rd, r_frame);
1206 act->pin -= GNTPIN_hstw_inc;
1207 if ( !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) )
1208 gnttab_clear_flag(_GTF_writing, &sha->flags);
1211 if ( !act->pin )
1212 gnttab_clear_flag(_GTF_reading, &sha->flags);
1214 spin_unlock(&rd->grant_table->lock);
1217 /* Grab a frame number from a grant entry and update the flags and pin
1218 count as appropriate. Note that this does *not* update the page
1219 type or reference counts, and does not check that the mfn is
1220 actually valid. */
1221 static int
1222 __acquire_grant_for_copy(
1223 struct domain *rd, unsigned long gref, int readonly,
1224 unsigned long *frame)
1226 grant_entry_t *sha;
1227 struct active_grant_entry *act;
1228 s16 rc = GNTST_okay;
1229 int retries = 0;
1230 union grant_combo scombo, prev_scombo, new_scombo;
1232 spin_lock(&rd->grant_table->lock);
1234 if ( unlikely(gref >= nr_grant_entries(rd->grant_table)) )
1235 PIN_FAIL(unlock_out, GNTST_bad_gntref,
1236 "Bad grant reference %ld\n", gref);
1238 act = &active_entry(rd->grant_table, gref);
1239 sha = &shared_entry(rd->grant_table, gref);
1241 /* If already pinned, check the active domid and avoid refcnt overflow. */
1242 if ( act->pin &&
1243 ((act->domid != current->domain->domain_id) ||
1244 (act->pin & 0x80808080U) != 0) )
1245 PIN_FAIL(unlock_out, GNTST_general_error,
1246 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
1247 act->domid, current->domain->domain_id, act->pin);
1249 if ( !act->pin ||
1250 (!readonly && !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask))) )
1252 scombo.word = *(u32 *)&sha->flags;
1254 for ( ; ; )
1256 /* If not already pinned, check the grant domid and type. */
1257 if ( !act->pin &&
1258 (((scombo.shorts.flags & GTF_type_mask) !=
1259 GTF_permit_access) ||
1260 (scombo.shorts.domid != current->domain->domain_id)) )
1261 PIN_FAIL(unlock_out, GNTST_general_error,
1262 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
1263 scombo.shorts.flags, scombo.shorts.domid,
1264 current->domain->domain_id);
1266 new_scombo = scombo;
1267 new_scombo.shorts.flags |= GTF_reading;
1269 if ( !readonly )
1271 new_scombo.shorts.flags |= GTF_writing;
1272 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
1273 PIN_FAIL(unlock_out, GNTST_general_error,
1274 "Attempt to write-pin a r/o grant entry.\n");
1277 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
1278 scombo.word, new_scombo.word);
1279 if ( likely(prev_scombo.word == scombo.word) )
1280 break;
1282 if ( retries++ == 4 )
1283 PIN_FAIL(unlock_out, GNTST_general_error,
1284 "Shared grant entry is unstable.\n");
1286 scombo = prev_scombo;
1289 if ( !act->pin )
1291 act->domid = scombo.shorts.domid;
1292 act->frame = gmfn_to_mfn(rd, sha->frame);
1296 act->pin += readonly ? GNTPIN_hstr_inc : GNTPIN_hstw_inc;
1298 *frame = act->frame;
1300 unlock_out:
1301 spin_unlock(&rd->grant_table->lock);
1302 return rc;
1305 static void
1306 __gnttab_copy(
1307 struct gnttab_copy *op)
1309 struct domain *sd = NULL, *dd = NULL;
1310 unsigned long s_frame, d_frame;
1311 char *sp, *dp;
1312 s16 rc = GNTST_okay;
1313 int have_d_grant = 0, have_s_grant = 0, have_s_ref = 0;
1314 int src_is_gref, dest_is_gref;
1316 if ( ((op->source.offset + op->len) > PAGE_SIZE) ||
1317 ((op->dest.offset + op->len) > PAGE_SIZE) )
1318 PIN_FAIL(error_out, GNTST_bad_copy_arg, "copy beyond page area.\n");
1320 src_is_gref = op->flags & GNTCOPY_source_gref;
1321 dest_is_gref = op->flags & GNTCOPY_dest_gref;
1323 if ( (op->source.domid != DOMID_SELF && !src_is_gref ) ||
1324 (op->dest.domid != DOMID_SELF && !dest_is_gref) )
1325 PIN_FAIL(error_out, GNTST_permission_denied,
1326 "only allow copy-by-mfn for DOMID_SELF.\n");
1328 if ( op->source.domid == DOMID_SELF )
1329 sd = rcu_lock_current_domain();
1330 else if ( (sd = rcu_lock_domain_by_id(op->source.domid)) == NULL )
1331 PIN_FAIL(error_out, GNTST_bad_domain,
1332 "couldn't find %d\n", op->source.domid);
1334 if ( op->dest.domid == DOMID_SELF )
1335 dd = rcu_lock_current_domain();
1336 else if ( (dd = rcu_lock_domain_by_id(op->dest.domid)) == NULL )
1337 PIN_FAIL(error_out, GNTST_bad_domain,
1338 "couldn't find %d\n", op->dest.domid);
1340 rc = xsm_grant_copy(sd, dd);
1341 if ( rc )
1343 rc = GNTST_permission_denied;
1344 goto error_out;
1347 if ( src_is_gref )
1349 rc = __acquire_grant_for_copy(sd, op->source.u.ref, 1, &s_frame);
1350 if ( rc != GNTST_okay )
1351 goto error_out;
1352 have_s_grant = 1;
1354 else
1356 s_frame = gmfn_to_mfn(sd, op->source.u.gmfn);
1358 if ( unlikely(!mfn_valid(s_frame)) )
1359 PIN_FAIL(error_out, GNTST_general_error,
1360 "source frame %lx invalid.\n", s_frame);
1361 if ( !get_page(mfn_to_page(s_frame), sd) )
1363 if ( !sd->is_dying )
1364 gdprintk(XENLOG_WARNING, "Could not get src frame %lx\n", s_frame);
1365 rc = GNTST_general_error;
1366 goto error_out;
1368 have_s_ref = 1;
1370 if ( dest_is_gref )
1372 rc = __acquire_grant_for_copy(dd, op->dest.u.ref, 0, &d_frame);
1373 if ( rc != GNTST_okay )
1374 goto error_out;
1375 have_d_grant = 1;
1377 else
1379 d_frame = gmfn_to_mfn(dd, op->dest.u.gmfn);
1381 if ( unlikely(!mfn_valid(d_frame)) )
1382 PIN_FAIL(error_out, GNTST_general_error,
1383 "destination frame %lx invalid.\n", d_frame);
1384 if ( !get_page_and_type(mfn_to_page(d_frame), dd, PGT_writable_page) )
1386 if ( !dd->is_dying )
1387 gdprintk(XENLOG_WARNING, "Could not get dst frame %lx\n", d_frame);
1388 rc = GNTST_general_error;
1389 goto error_out;
1392 sp = map_domain_page(s_frame);
1393 dp = map_domain_page(d_frame);
1395 memcpy(dp + op->dest.offset, sp + op->source.offset, op->len);
1397 unmap_domain_page(dp);
1398 unmap_domain_page(sp);
1400 gnttab_mark_dirty(dd, d_frame);
1402 put_page_and_type(mfn_to_page(d_frame));
1403 error_out:
1404 if ( have_s_ref )
1405 put_page(mfn_to_page(s_frame));
1406 if ( have_s_grant )
1407 __release_grant_for_copy(sd, op->source.u.ref, 1);
1408 if ( have_d_grant )
1409 __release_grant_for_copy(dd, op->dest.u.ref, 0);
1410 if ( sd )
1411 rcu_unlock_domain(sd);
1412 if ( dd )
1413 rcu_unlock_domain(dd);
1414 op->status = rc;
1417 static long
1418 gnttab_copy(
1419 XEN_GUEST_HANDLE(gnttab_copy_t) uop, unsigned int count)
1421 int i;
1422 struct gnttab_copy op;
1424 for ( i = 0; i < count; i++ )
1426 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
1427 return -EFAULT;
1428 __gnttab_copy(&op);
1429 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
1430 return -EFAULT;
1432 return 0;
1435 long
1436 do_grant_table_op(
1437 unsigned int cmd, XEN_GUEST_HANDLE(void) uop, unsigned int count)
1439 long rc;
1440 struct domain *d = current->domain;
1442 if ( count > 512 )
1443 return -EINVAL;
1445 LOCK_BIGLOCK(d);
1447 rc = -EFAULT;
1448 switch ( cmd )
1450 case GNTTABOP_map_grant_ref:
1452 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) map =
1453 guest_handle_cast(uop, gnttab_map_grant_ref_t);
1454 if ( unlikely(!guest_handle_okay(map, count)) )
1455 goto out;
1456 rc = gnttab_map_grant_ref(map, count);
1457 break;
1459 case GNTTABOP_unmap_grant_ref:
1461 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) unmap =
1462 guest_handle_cast(uop, gnttab_unmap_grant_ref_t);
1463 if ( unlikely(!guest_handle_okay(unmap, count)) )
1464 goto out;
1465 rc = gnttab_unmap_grant_ref(unmap, count);
1466 break;
1468 case GNTTABOP_unmap_and_replace:
1470 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) unmap =
1471 guest_handle_cast(uop, gnttab_unmap_and_replace_t);
1472 if ( unlikely(!guest_handle_okay(unmap, count)) )
1473 goto out;
1474 rc = -ENOSYS;
1475 if ( unlikely(!replace_grant_supported()) )
1476 goto out;
1477 rc = gnttab_unmap_and_replace(unmap, count);
1478 break;
1480 case GNTTABOP_setup_table:
1482 rc = gnttab_setup_table(
1483 guest_handle_cast(uop, gnttab_setup_table_t), count);
1484 break;
1486 case GNTTABOP_transfer:
1488 XEN_GUEST_HANDLE(gnttab_transfer_t) transfer =
1489 guest_handle_cast(uop, gnttab_transfer_t);
1490 if ( unlikely(!guest_handle_okay(transfer, count)) )
1491 goto out;
1492 rc = gnttab_transfer(transfer, count);
1493 break;
1495 case GNTTABOP_copy:
1497 XEN_GUEST_HANDLE(gnttab_copy_t) copy =
1498 guest_handle_cast(uop, gnttab_copy_t);
1499 if ( unlikely(!guest_handle_okay(copy, count)) )
1500 goto out;
1501 rc = gnttab_copy(copy, count);
1502 break;
1504 case GNTTABOP_query_size:
1506 rc = gnttab_query_size(
1507 guest_handle_cast(uop, gnttab_query_size_t), count);
1508 break;
1510 default:
1511 rc = -ENOSYS;
1512 break;
1515 out:
1516 UNLOCK_BIGLOCK(d);
1518 return rc;
1521 #ifdef CONFIG_COMPAT
1522 #include "compat/grant_table.c"
1523 #endif
1525 static unsigned int max_nr_active_grant_frames(void)
1527 return (((max_nr_grant_frames * (PAGE_SIZE / sizeof(grant_entry_t))) +
1528 ((PAGE_SIZE / sizeof(struct active_grant_entry))-1))
1529 / (PAGE_SIZE / sizeof(struct active_grant_entry)));
1532 int
1533 grant_table_create(
1534 struct domain *d)
1536 struct grant_table *t;
1537 int i;
1539 /* If this sizeof assertion fails, fix the function: shared_index */
1540 ASSERT(sizeof(grant_entry_t) == 8);
1542 if ( (t = xmalloc(struct grant_table)) == NULL )
1543 goto no_mem_0;
1545 /* Simple stuff. */
1546 memset(t, 0, sizeof(*t));
1547 spin_lock_init(&t->lock);
1548 t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
1550 /* Active grant table. */
1551 if ( (t->active = xmalloc_array(struct active_grant_entry *,
1552 max_nr_active_grant_frames())) == NULL )
1553 goto no_mem_1;
1554 memset(t->active, 0, max_nr_active_grant_frames() * sizeof(t->active[0]));
1555 for ( i = 0;
1556 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1558 if ( (t->active[i] = alloc_xenheap_page()) == NULL )
1559 goto no_mem_2;
1560 clear_page(t->active[i]);
1563 /* Tracking of mapped foreign frames table */
1564 if ( (t->maptrack = xmalloc_array(struct grant_mapping *,
1565 max_nr_maptrack_frames())) == NULL )
1566 goto no_mem_2;
1567 memset(t->maptrack, 0, max_nr_maptrack_frames() * sizeof(t->maptrack[0]));
1568 if ( (t->maptrack[0] = alloc_xenheap_page()) == NULL )
1569 goto no_mem_3;
1570 clear_page(t->maptrack[0]);
1571 t->maptrack_limit = PAGE_SIZE / sizeof(struct grant_mapping);
1572 for ( i = 0; i < t->maptrack_limit; i++ )
1573 t->maptrack[0][i].ref = i+1;
1575 /* Shared grant table. */
1576 if ( (t->shared = xmalloc_array(struct grant_entry *,
1577 max_nr_grant_frames)) == NULL )
1578 goto no_mem_3;
1579 memset(t->shared, 0, max_nr_grant_frames * sizeof(t->shared[0]));
1580 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1582 if ( (t->shared[i] = alloc_xenheap_page()) == NULL )
1583 goto no_mem_4;
1584 clear_page(t->shared[i]);
1587 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1588 gnttab_create_shared_page(d, t, i);
1590 /* Okay, install the structure. */
1591 d->grant_table = t;
1592 return 0;
1594 no_mem_4:
1595 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1596 free_xenheap_page(t->shared[i]);
1597 xfree(t->shared);
1598 no_mem_3:
1599 free_xenheap_page(t->maptrack[0]);
1600 xfree(t->maptrack);
1601 no_mem_2:
1602 for ( i = 0;
1603 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1604 free_xenheap_page(t->active[i]);
1605 xfree(t->active);
1606 no_mem_1:
1607 xfree(t);
1608 no_mem_0:
1609 return -ENOMEM;
1612 void
1613 gnttab_release_mappings(
1614 struct domain *d)
1616 struct grant_table *gt = d->grant_table;
1617 struct grant_mapping *map;
1618 grant_ref_t ref;
1619 grant_handle_t handle;
1620 struct domain *rd;
1621 struct active_grant_entry *act;
1622 struct grant_entry *sha;
1624 BUG_ON(!d->is_dying);
1626 for ( handle = 0; handle < gt->maptrack_limit; handle++ )
1628 map = &maptrack_entry(gt, handle);
1629 if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
1630 continue;
1632 ref = map->ref;
1634 gdprintk(XENLOG_INFO, "Grant release (%hu) ref:(%hu) "
1635 "flags:(%x) dom:(%hu)\n",
1636 handle, ref, map->flags, map->domid);
1638 rd = rcu_lock_domain_by_id(map->domid);
1639 if ( rd == NULL )
1641 /* Nothing to clear up... */
1642 map->flags = 0;
1643 continue;
1646 spin_lock(&rd->grant_table->lock);
1648 act = &active_entry(rd->grant_table, ref);
1649 sha = &shared_entry(rd->grant_table, ref);
1651 if ( map->flags & GNTMAP_readonly )
1653 if ( map->flags & GNTMAP_device_map )
1655 BUG_ON(!(act->pin & GNTPIN_devr_mask));
1656 act->pin -= GNTPIN_devr_inc;
1657 if ( !is_iomem_page(act->frame) )
1658 put_page(mfn_to_page(act->frame));
1661 if ( map->flags & GNTMAP_host_map )
1663 BUG_ON(!(act->pin & GNTPIN_hstr_mask));
1664 act->pin -= GNTPIN_hstr_inc;
1665 if ( !is_iomem_page(act->frame) )
1666 gnttab_release_put_page(mfn_to_page(act->frame));
1669 else
1671 if ( map->flags & GNTMAP_device_map )
1673 BUG_ON(!(act->pin & GNTPIN_devw_mask));
1674 act->pin -= GNTPIN_devw_inc;
1675 if ( !is_iomem_page(act->frame) )
1676 put_page_and_type(mfn_to_page(act->frame));
1679 if ( map->flags & GNTMAP_host_map )
1681 BUG_ON(!(act->pin & GNTPIN_hstw_mask));
1682 act->pin -= GNTPIN_hstw_inc;
1683 if ( !is_iomem_page(act->frame) )
1684 gnttab_release_put_page_and_type(mfn_to_page(act->frame));
1687 if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
1688 gnttab_clear_flag(_GTF_writing, &sha->flags);
1691 if ( act->pin == 0 )
1692 gnttab_clear_flag(_GTF_reading, &sha->flags);
1694 spin_unlock(&rd->grant_table->lock);
1696 rcu_unlock_domain(rd);
1698 map->flags = 0;
1703 void
1704 grant_table_destroy(
1705 struct domain *d)
1707 struct grant_table *t = d->grant_table;
1708 int i;
1710 if ( t == NULL )
1711 return;
1713 for ( i = 0; i < nr_grant_frames(t); i++ )
1714 free_xenheap_page(t->shared[i]);
1715 xfree(t->shared);
1717 for ( i = 0; i < nr_maptrack_frames(t); i++ )
1718 free_xenheap_page(t->maptrack[i]);
1719 xfree(t->maptrack);
1721 for ( i = 0; i < nr_active_grant_frames(t); i++ )
1722 free_xenheap_page(t->active[i]);
1723 xfree(t->active);
1725 xfree(t);
1726 d->grant_table = NULL;
1729 /*
1730 * Local variables:
1731 * mode: C
1732 * c-set-style: "BSD"
1733 * c-basic-offset: 4
1734 * tab-width: 4
1735 * indent-tabs-mode: nil
1736 * End:
1737 */