ia64/xen-unstable

view xen/common/grant_table.c @ 19835:edfdeb150f27

Fix buildsystem to detect udev > version 124

udev removed the udevinfo symlink from versions higher than 123 and
xen's build-system could not detect if udev is in place and has the
required version.

Signed-off-by: Marc-A. Dahlhaus <mad@wol.de>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 25 13:02:37 2009 +0100 (2009-06-25)
parents 2e83c670f680
children
line source
1 /******************************************************************************
2 * common/grant_table.c
3 *
4 * Mechanism for granting foreign access to page frames, and receiving
5 * page-ownership transfers.
6 *
7 * Copyright (c) 2005-2006 Christopher Clark
8 * Copyright (c) 2004 K A Fraser
9 * Copyright (c) 2005 Andrew Warfield
10 * Modifications by Geoffrey Lefebvre are (c) Intel Research Cambridge
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
27 #include <xen/config.h>
28 #include <xen/iocap.h>
29 #include <xen/lib.h>
30 #include <xen/sched.h>
31 #include <xen/mm.h>
32 #include <xen/trace.h>
33 #include <xen/guest_access.h>
34 #include <xen/domain_page.h>
35 #include <xen/iommu.h>
36 #include <xen/paging.h>
37 #include <xsm/xsm.h>
39 #ifndef max_nr_grant_frames
40 unsigned int max_nr_grant_frames = DEFAULT_MAX_NR_GRANT_FRAMES;
41 integer_param("gnttab_max_nr_frames", max_nr_grant_frames);
42 #endif
44 /* The maximum number of grant mappings is defined as a multiplier of the
45 * maximum number of grant table entries. This defines the multiplier used.
46 * Pretty arbitrary. [POLICY]
47 */
48 #define MAX_MAPTRACK_TO_GRANTS_RATIO 8
50 /*
51 * The first two members of a grant entry are updated as a combined pair.
52 * The following union allows that to happen in an endian-neutral fashion.
53 */
54 union grant_combo {
55 uint32_t word;
56 struct {
57 uint16_t flags;
58 domid_t domid;
59 } shorts;
60 };
62 /* Used to share code between unmap_grant_ref and unmap_and_replace. */
63 struct gnttab_unmap_common {
64 /* Input */
65 uint64_t host_addr;
66 uint64_t dev_bus_addr;
67 uint64_t new_addr;
68 grant_handle_t handle;
70 /* Return */
71 int16_t status;
73 /* Shared state beteen *_unmap and *_unmap_complete */
74 u16 flags;
75 unsigned long frame;
76 struct grant_mapping *map;
77 struct domain *rd;
78 };
80 /* Number of unmap operations that are done between each tlb flush */
81 #define GNTTAB_UNMAP_BATCH_SIZE 32
84 #define PIN_FAIL(_lbl, _rc, _f, _a...) \
85 do { \
86 gdprintk(XENLOG_WARNING, _f, ## _a ); \
87 rc = (_rc); \
88 goto _lbl; \
89 } while ( 0 )
91 #define MAPTRACK_PER_PAGE (PAGE_SIZE / sizeof(struct grant_mapping))
92 #define maptrack_entry(t, e) \
93 ((t)->maptrack[(e)/MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE])
95 static inline unsigned int
96 nr_maptrack_frames(struct grant_table *t)
97 {
98 return t->maptrack_limit / MAPTRACK_PER_PAGE;
99 }
101 static unsigned inline int max_nr_maptrack_frames(void)
102 {
103 return (max_nr_grant_frames * MAX_MAPTRACK_TO_GRANTS_RATIO);
104 }
107 #define SHGNT_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_t))
108 #define shared_entry(t, e) \
109 ((t)->shared[(e)/SHGNT_PER_PAGE][(e)%SHGNT_PER_PAGE])
110 #define ACGNT_PER_PAGE (PAGE_SIZE / sizeof(struct active_grant_entry))
111 #define active_entry(t, e) \
112 ((t)->active[(e)/ACGNT_PER_PAGE][(e)%ACGNT_PER_PAGE])
114 static inline int
115 __get_maptrack_handle(
116 struct grant_table *t)
117 {
118 unsigned int h;
119 if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) )
120 return -1;
121 t->maptrack_head = maptrack_entry(t, h).ref;
122 return h;
123 }
125 static inline void
126 put_maptrack_handle(
127 struct grant_table *t, int handle)
128 {
129 maptrack_entry(t, handle).ref = t->maptrack_head;
130 t->maptrack_head = handle;
131 }
133 static inline int
134 get_maptrack_handle(
135 struct grant_table *lgt)
136 {
137 int i;
138 grant_handle_t handle;
139 struct grant_mapping *new_mt;
140 unsigned int new_mt_limit, nr_frames;
142 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
143 {
144 spin_lock(&lgt->lock);
146 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
147 {
148 nr_frames = nr_maptrack_frames(lgt);
149 if ( nr_frames >= max_nr_maptrack_frames() )
150 {
151 spin_unlock(&lgt->lock);
152 return -1;
153 }
155 new_mt = alloc_xenheap_page();
156 if ( new_mt == NULL )
157 {
158 spin_unlock(&lgt->lock);
159 return -1;
160 }
162 clear_page(new_mt);
164 new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE;
166 for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
167 {
168 new_mt[i % MAPTRACK_PER_PAGE].ref = i+1;
169 new_mt[i % MAPTRACK_PER_PAGE].flags = 0;
170 }
172 lgt->maptrack[nr_frames] = new_mt;
173 lgt->maptrack_limit = new_mt_limit;
175 gdprintk(XENLOG_INFO,
176 "Increased maptrack size to %u frames.\n", nr_frames + 1);
177 handle = __get_maptrack_handle(lgt);
178 }
180 spin_unlock(&lgt->lock);
181 }
182 return handle;
183 }
185 /*
186 * Returns 0 if TLB flush / invalidate required by caller.
187 * va will indicate the address to be invalidated.
188 *
189 * addr is _either_ a host virtual address, or the address of the pte to
190 * update, as indicated by the GNTMAP_contains_pte flag.
191 */
192 static void
193 __gnttab_map_grant_ref(
194 struct gnttab_map_grant_ref *op)
195 {
196 struct domain *ld, *rd, *owner;
197 struct vcpu *led;
198 int handle;
199 unsigned long frame = 0, nr_gets = 0;
200 int rc = GNTST_okay;
201 u32 old_pin;
202 u32 act_pin;
203 unsigned int cache_flags;
204 struct active_grant_entry *act;
205 struct grant_mapping *mt;
206 grant_entry_t *sha;
207 union grant_combo scombo, prev_scombo, new_scombo;
209 /*
210 * We bound the number of times we retry CMPXCHG on memory locations that
211 * we share with a guest OS. The reason is that the guest can modify that
212 * location at a higher rate than we can read-modify-CMPXCHG, so the guest
213 * could cause us to livelock. There are a few cases where it is valid for
214 * the guest to race our updates (e.g., to change the GTF_readonly flag),
215 * so we allow a few retries before failing.
216 */
217 int retries = 0;
219 led = current;
220 ld = led->domain;
222 if ( unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) )
223 {
224 gdprintk(XENLOG_INFO, "Bad flags in grant map op (%x).\n", op->flags);
225 op->status = GNTST_bad_gntref;
226 return;
227 }
229 if ( unlikely((rd = rcu_lock_domain_by_id(op->dom)) == NULL) )
230 {
231 gdprintk(XENLOG_INFO, "Could not find domain %d\n", op->dom);
232 op->status = GNTST_bad_domain;
233 return;
234 }
236 rc = xsm_grant_mapref(ld, rd, op->flags);
237 if ( rc )
238 {
239 rcu_unlock_domain(rd);
240 op->status = GNTST_permission_denied;
241 return;
242 }
244 if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) )
245 {
246 rcu_unlock_domain(rd);
247 gdprintk(XENLOG_INFO, "Failed to obtain maptrack handle.\n");
248 op->status = GNTST_no_device_space;
249 return;
250 }
252 spin_lock(&rd->grant_table->lock);
254 /* Bounds check on the grant ref */
255 if ( unlikely(op->ref >= nr_grant_entries(rd->grant_table)))
256 PIN_FAIL(unlock_out, GNTST_bad_gntref, "Bad ref (%d).\n", op->ref);
258 act = &active_entry(rd->grant_table, op->ref);
259 sha = &shared_entry(rd->grant_table, op->ref);
261 /* If already pinned, check the active domid and avoid refcnt overflow. */
262 if ( act->pin &&
263 ((act->domid != ld->domain_id) ||
264 (act->pin & 0x80808080U) != 0) )
265 PIN_FAIL(unlock_out, GNTST_general_error,
266 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
267 act->domid, ld->domain_id, act->pin);
269 if ( !act->pin ||
270 (!(op->flags & GNTMAP_readonly) &&
271 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask))) )
272 {
273 scombo.word = *(u32 *)&sha->flags;
275 /*
276 * This loop attempts to set the access (reading/writing) flags
277 * in the grant table entry. It tries a cmpxchg on the field
278 * up to five times, and then fails under the assumption that
279 * the guest is misbehaving.
280 */
281 for ( ; ; )
282 {
283 /* If not already pinned, check the grant domid and type. */
284 if ( !act->pin &&
285 (((scombo.shorts.flags & GTF_type_mask) !=
286 GTF_permit_access) ||
287 (scombo.shorts.domid != ld->domain_id)) )
288 PIN_FAIL(unlock_out, GNTST_general_error,
289 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
290 scombo.shorts.flags, scombo.shorts.domid,
291 ld->domain_id);
293 new_scombo = scombo;
294 new_scombo.shorts.flags |= GTF_reading;
296 if ( !(op->flags & GNTMAP_readonly) )
297 {
298 new_scombo.shorts.flags |= GTF_writing;
299 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
300 PIN_FAIL(unlock_out, GNTST_general_error,
301 "Attempt to write-pin a r/o grant entry.\n");
302 }
304 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
305 scombo.word, new_scombo.word);
306 if ( likely(prev_scombo.word == scombo.word) )
307 break;
309 if ( retries++ == 4 )
310 PIN_FAIL(unlock_out, GNTST_general_error,
311 "Shared grant entry is unstable.\n");
313 scombo = prev_scombo;
314 }
316 if ( !act->pin )
317 {
318 act->domid = scombo.shorts.domid;
319 act->gfn = sha->frame;
320 act->frame = gmfn_to_mfn(rd, sha->frame);
321 }
322 }
324 old_pin = act->pin;
325 if ( op->flags & GNTMAP_device_map )
326 act->pin += (op->flags & GNTMAP_readonly) ?
327 GNTPIN_devr_inc : GNTPIN_devw_inc;
328 if ( op->flags & GNTMAP_host_map )
329 act->pin += (op->flags & GNTMAP_readonly) ?
330 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
332 frame = act->frame;
333 act_pin = act->pin;
335 cache_flags = (sha->flags & (GTF_PAT | GTF_PWT | GTF_PCD) );
337 spin_unlock(&rd->grant_table->lock);
339 if ( !mfn_valid(frame) ||
340 (owner = page_get_owner_and_reference(mfn_to_page(frame))) == dom_io )
341 {
342 /* Only needed the reference to confirm dom_io ownership. */
343 if ( mfn_valid(frame) )
344 put_page(mfn_to_page(frame));
346 if ( !iomem_access_permitted(rd, frame, frame) )
347 {
348 gdprintk(XENLOG_WARNING,
349 "Iomem mapping not permitted %lx (domain %d)\n",
350 frame, rd->domain_id);
351 rc = GNTST_general_error;
352 goto undo_out;
353 }
355 rc = create_grant_host_mapping(
356 op->host_addr, frame, op->flags, cache_flags);
357 if ( rc != GNTST_okay )
358 goto undo_out;
359 }
360 else if ( owner == rd )
361 {
362 if ( gnttab_host_mapping_get_page_type(op, ld, rd) &&
363 !get_page_type(mfn_to_page(frame), PGT_writable_page) )
364 goto could_not_pin;
366 nr_gets++;
367 if ( op->flags & GNTMAP_host_map )
368 {
369 rc = create_grant_host_mapping(op->host_addr, frame, op->flags, 0);
370 if ( rc != GNTST_okay )
371 goto undo_out;
373 if ( op->flags & GNTMAP_device_map )
374 {
375 nr_gets++;
376 (void)get_page(mfn_to_page(frame), rd);
377 if ( !(op->flags & GNTMAP_readonly) )
378 get_page_type(mfn_to_page(frame), PGT_writable_page);
379 }
380 }
381 }
382 else
383 {
384 could_not_pin:
385 if ( !rd->is_dying )
386 gdprintk(XENLOG_WARNING, "Could not pin grant frame %lx\n",
387 frame);
388 if ( owner != NULL )
389 put_page(mfn_to_page(frame));
390 rc = GNTST_general_error;
391 goto undo_out;
392 }
394 if ( need_iommu(ld) &&
395 !(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
396 (act_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
397 {
398 if ( iommu_map_page(ld, mfn_to_gmfn(ld, frame), frame) )
399 {
400 rc = GNTST_general_error;
401 goto undo_out;
402 }
403 }
405 TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
407 mt = &maptrack_entry(ld->grant_table, handle);
408 mt->domid = op->dom;
409 mt->ref = op->ref;
410 mt->flags = op->flags;
412 op->dev_bus_addr = (u64)frame << PAGE_SHIFT;
413 op->handle = handle;
414 op->status = GNTST_okay;
416 rcu_unlock_domain(rd);
417 return;
419 undo_out:
420 if ( nr_gets > 1 )
421 {
422 if ( !(op->flags & GNTMAP_readonly) )
423 put_page_type(mfn_to_page(frame));
424 put_page(mfn_to_page(frame));
425 }
426 if ( nr_gets > 0 )
427 {
428 if ( gnttab_host_mapping_get_page_type(op, ld, rd) )
429 put_page_type(mfn_to_page(frame));
430 put_page(mfn_to_page(frame));
431 }
433 spin_lock(&rd->grant_table->lock);
435 act = &active_entry(rd->grant_table, op->ref);
436 sha = &shared_entry(rd->grant_table, op->ref);
438 if ( op->flags & GNTMAP_device_map )
439 act->pin -= (op->flags & GNTMAP_readonly) ?
440 GNTPIN_devr_inc : GNTPIN_devw_inc;
441 if ( op->flags & GNTMAP_host_map )
442 act->pin -= (op->flags & GNTMAP_readonly) ?
443 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
445 if ( !(op->flags & GNTMAP_readonly) &&
446 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
447 gnttab_clear_flag(_GTF_writing, &sha->flags);
449 if ( !act->pin )
450 gnttab_clear_flag(_GTF_reading, &sha->flags);
452 unlock_out:
453 spin_unlock(&rd->grant_table->lock);
454 op->status = rc;
455 put_maptrack_handle(ld->grant_table, handle);
456 rcu_unlock_domain(rd);
457 }
459 static long
460 gnttab_map_grant_ref(
461 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) uop, unsigned int count)
462 {
463 int i;
464 struct gnttab_map_grant_ref op;
466 for ( i = 0; i < count; i++ )
467 {
468 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
469 return -EFAULT;
470 __gnttab_map_grant_ref(&op);
471 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
472 return -EFAULT;
473 }
475 return 0;
476 }
478 static void
479 __gnttab_unmap_common(
480 struct gnttab_unmap_common *op)
481 {
482 domid_t dom;
483 struct domain *ld, *rd;
484 struct active_grant_entry *act;
485 grant_entry_t *sha;
486 s16 rc = 0;
487 u32 old_pin;
489 ld = current->domain;
491 op->frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT);
493 if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) )
494 {
495 gdprintk(XENLOG_INFO, "Bad handle (%d).\n", op->handle);
496 op->status = GNTST_bad_handle;
497 return;
498 }
500 op->map = &maptrack_entry(ld->grant_table, op->handle);
502 if ( unlikely(!op->map->flags) )
503 {
504 gdprintk(XENLOG_INFO, "Zero flags for handle (%d).\n", op->handle);
505 op->status = GNTST_bad_handle;
506 return;
507 }
509 dom = op->map->domid;
510 op->flags = op->map->flags;
512 if ( unlikely((op->rd = rd = rcu_lock_domain_by_id(dom)) == NULL) )
513 {
514 /* This can happen when a grant is implicitly unmapped. */
515 gdprintk(XENLOG_INFO, "Could not find domain %d\n", dom);
516 domain_crash(ld); /* naughty... */
517 return;
518 }
520 rc = xsm_grant_unmapref(ld, rd);
521 if ( rc )
522 {
523 rcu_unlock_domain(rd);
524 op->status = GNTST_permission_denied;
525 return;
526 }
528 TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom);
530 spin_lock(&rd->grant_table->lock);
532 act = &active_entry(rd->grant_table, op->map->ref);
533 sha = &shared_entry(rd->grant_table, op->map->ref);
534 old_pin = act->pin;
536 if ( op->frame == 0 )
537 {
538 op->frame = act->frame;
539 }
540 else
541 {
542 if ( unlikely(op->frame != act->frame) )
543 PIN_FAIL(unmap_out, GNTST_general_error,
544 "Bad frame number doesn't match gntref. (%lx != %lx)\n",
545 op->frame, act->frame);
546 if ( op->flags & GNTMAP_device_map )
547 {
548 ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
549 op->map->flags &= ~GNTMAP_device_map;
550 if ( op->flags & GNTMAP_readonly )
551 act->pin -= GNTPIN_devr_inc;
552 else
553 act->pin -= GNTPIN_devw_inc;
554 }
555 }
557 if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
558 {
559 if ( (rc = replace_grant_host_mapping(op->host_addr,
560 op->frame, op->new_addr,
561 op->flags)) < 0 )
562 goto unmap_out;
564 ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
565 op->map->flags &= ~GNTMAP_host_map;
566 if ( op->flags & GNTMAP_readonly )
567 act->pin -= GNTPIN_hstr_inc;
568 else
569 act->pin -= GNTPIN_hstw_inc;
570 }
572 if ( need_iommu(ld) &&
573 (old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
574 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
575 {
576 if ( iommu_unmap_page(ld, mfn_to_gmfn(ld, op->frame)) )
577 {
578 rc = GNTST_general_error;
579 goto unmap_out;
580 }
581 }
583 /* If just unmapped a writable mapping, mark as dirtied */
584 if ( !(op->flags & GNTMAP_readonly) )
585 gnttab_mark_dirty(rd, op->frame);
587 unmap_out:
588 op->status = rc;
589 spin_unlock(&rd->grant_table->lock);
590 rcu_unlock_domain(rd);
591 }
593 static void
594 __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
595 {
596 struct domain *ld, *rd;
597 struct active_grant_entry *act;
598 grant_entry_t *sha;
600 rd = op->rd;
602 if ( rd == NULL )
603 {
604 /*
605 * Suggests that __gntab_unmap_common failed in
606 * rcu_lock_domain_by_id() or earlier, and so we have nothing
607 * to complete
608 */
609 return;
610 }
612 ld = current->domain;
614 rcu_lock_domain(rd);
615 spin_lock(&rd->grant_table->lock);
617 act = &active_entry(rd->grant_table, op->map->ref);
618 sha = &shared_entry(rd->grant_table, op->map->ref);
620 if ( unlikely(op->frame != act->frame) )
621 {
622 /*
623 * Suggests that __gntab_unmap_common failed early and so
624 * nothing further to do
625 */
626 goto unmap_out;
627 }
629 if ( op->flags & GNTMAP_device_map )
630 {
631 if ( !is_iomem_page(act->frame) )
632 {
633 if ( op->flags & GNTMAP_readonly )
634 put_page(mfn_to_page(op->frame));
635 else
636 put_page_and_type(mfn_to_page(op->frame));
637 }
638 }
640 if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
641 {
642 if ( op->status != 0 )
643 {
644 /*
645 * Suggests that __gntab_unmap_common failed in
646 * replace_grant_host_mapping() so nothing further to do
647 */
648 goto unmap_out;
649 }
651 if ( !is_iomem_page(op->frame) )
652 {
653 if ( gnttab_host_mapping_get_page_type(op, ld, rd) )
654 put_page_type(mfn_to_page(op->frame));
655 put_page(mfn_to_page(op->frame));
656 }
657 }
659 if ( (op->map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
660 {
661 op->map->flags = 0;
662 put_maptrack_handle(ld->grant_table, op->handle);
663 }
665 if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
666 !(op->flags & GNTMAP_readonly) )
667 gnttab_clear_flag(_GTF_writing, &sha->flags);
669 if ( act->pin == 0 )
670 gnttab_clear_flag(_GTF_reading, &sha->flags);
672 unmap_out:
673 spin_unlock(&rd->grant_table->lock);
674 rcu_unlock_domain(rd);
675 }
677 static void
678 __gnttab_unmap_grant_ref(
679 struct gnttab_unmap_grant_ref *op,
680 struct gnttab_unmap_common *common)
681 {
682 common->host_addr = op->host_addr;
683 common->dev_bus_addr = op->dev_bus_addr;
684 common->handle = op->handle;
686 /* Intialise these in case common contains old state */
687 common->new_addr = 0;
688 common->rd = NULL;
690 __gnttab_unmap_common(common);
691 op->status = common->status;
692 }
695 static long
696 gnttab_unmap_grant_ref(
697 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) uop, unsigned int count)
698 {
699 int i, c, partial_done, done = 0;
700 struct gnttab_unmap_grant_ref op;
701 struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
703 while ( count != 0 )
704 {
705 c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
706 partial_done = 0;
708 for ( i = 0; i < c; i++ )
709 {
710 if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
711 goto fault;
712 __gnttab_unmap_grant_ref(&op, &(common[i]));
713 ++partial_done;
714 if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
715 goto fault;
716 }
718 flush_tlb_mask(&current->domain->domain_dirty_cpumask);
720 for ( i = 0; i < partial_done; i++ )
721 __gnttab_unmap_common_complete(&(common[i]));
723 count -= c;
724 done += c;
725 }
727 return 0;
729 fault:
730 flush_tlb_mask(&current->domain->domain_dirty_cpumask);
732 for ( i = 0; i < partial_done; i++ )
733 __gnttab_unmap_common_complete(&(common[i]));
734 return -EFAULT;
735 }
737 static void
738 __gnttab_unmap_and_replace(
739 struct gnttab_unmap_and_replace *op,
740 struct gnttab_unmap_common *common)
741 {
742 common->host_addr = op->host_addr;
743 common->new_addr = op->new_addr;
744 common->handle = op->handle;
746 /* Intialise these in case common contains old state */
747 common->dev_bus_addr = 0;
748 common->rd = NULL;
750 __gnttab_unmap_common(common);
751 op->status = common->status;
752 }
754 static long
755 gnttab_unmap_and_replace(
756 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) uop, unsigned int count)
757 {
758 int i, c, partial_done, done = 0;
759 struct gnttab_unmap_and_replace op;
760 struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
762 while ( count != 0 )
763 {
764 c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
765 partial_done = 0;
767 for ( i = 0; i < c; i++ )
768 {
769 if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
770 goto fault;
771 __gnttab_unmap_and_replace(&op, &(common[i]));
772 ++partial_done;
773 if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
774 goto fault;
775 }
777 flush_tlb_mask(&current->domain->domain_dirty_cpumask);
779 for ( i = 0; i < partial_done; i++ )
780 __gnttab_unmap_common_complete(&(common[i]));
782 count -= c;
783 done += c;
784 }
786 return 0;
788 fault:
789 flush_tlb_mask(&current->domain->domain_dirty_cpumask);
791 for ( i = 0; i < partial_done; i++ )
792 __gnttab_unmap_common_complete(&(common[i]));
793 return -EFAULT;
794 }
796 int
797 gnttab_grow_table(struct domain *d, unsigned int req_nr_frames)
798 {
799 /* d's grant table lock must be held by the caller */
801 struct grant_table *gt = d->grant_table;
802 unsigned int i;
804 ASSERT(req_nr_frames <= max_nr_grant_frames);
806 gdprintk(XENLOG_INFO,
807 "Expanding dom (%d) grant table from (%d) to (%d) frames.\n",
808 d->domain_id, nr_grant_frames(gt), req_nr_frames);
810 /* Active */
811 for ( i = nr_active_grant_frames(gt);
812 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
813 {
814 if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
815 goto active_alloc_failed;
816 clear_page(gt->active[i]);
817 }
819 /* Shared */
820 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
821 {
822 if ( (gt->shared[i] = alloc_xenheap_page()) == NULL )
823 goto shared_alloc_failed;
824 clear_page(gt->shared[i]);
825 }
827 /* Share the new shared frames with the recipient domain */
828 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
829 gnttab_create_shared_page(d, gt, i);
831 gt->nr_grant_frames = req_nr_frames;
833 return 1;
835 shared_alloc_failed:
836 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
837 {
838 free_xenheap_page(gt->shared[i]);
839 gt->shared[i] = NULL;
840 }
841 active_alloc_failed:
842 for ( i = nr_active_grant_frames(gt);
843 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
844 {
845 free_xenheap_page(gt->active[i]);
846 gt->active[i] = NULL;
847 }
848 gdprintk(XENLOG_INFO, "Allocation failure when expanding grant table.\n");
849 return 0;
850 }
852 static long
853 gnttab_setup_table(
854 XEN_GUEST_HANDLE(gnttab_setup_table_t) uop, unsigned int count)
855 {
856 struct gnttab_setup_table op;
857 struct domain *d;
858 int i;
859 unsigned long gmfn;
860 domid_t dom;
862 if ( count != 1 )
863 return -EINVAL;
865 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
866 {
867 gdprintk(XENLOG_INFO, "Fault while reading gnttab_setup_table_t.\n");
868 return -EFAULT;
869 }
871 if ( unlikely(op.nr_frames > max_nr_grant_frames) )
872 {
873 gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table frames"
874 " per domain.\n",
875 max_nr_grant_frames);
876 op.status = GNTST_general_error;
877 goto out1;
878 }
880 dom = op.dom;
881 if ( dom == DOMID_SELF )
882 {
883 d = rcu_lock_current_domain();
884 }
885 else
886 {
887 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
888 {
889 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
890 op.status = GNTST_bad_domain;
891 goto out1;
892 }
894 if ( unlikely(!IS_PRIV_FOR(current->domain, d)) )
895 {
896 op.status = GNTST_permission_denied;
897 goto out2;
898 }
899 }
901 if ( xsm_grant_setup(current->domain, d) )
902 {
903 op.status = GNTST_permission_denied;
904 goto out2;
905 }
907 spin_lock(&d->grant_table->lock);
909 if ( (op.nr_frames > nr_grant_frames(d->grant_table)) &&
910 !gnttab_grow_table(d, op.nr_frames) )
911 {
912 gdprintk(XENLOG_INFO,
913 "Expand grant table to %d failed. Current: %d Max: %d.\n",
914 op.nr_frames,
915 nr_grant_frames(d->grant_table),
916 max_nr_grant_frames);
917 op.status = GNTST_general_error;
918 goto out3;
919 }
921 op.status = GNTST_okay;
922 for ( i = 0; i < op.nr_frames; i++ )
923 {
924 gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
925 (void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
926 }
928 out3:
929 spin_unlock(&d->grant_table->lock);
930 out2:
931 rcu_unlock_domain(d);
932 out1:
933 if ( unlikely(copy_to_guest(uop, &op, 1)) )
934 return -EFAULT;
936 return 0;
937 }
939 static long
940 gnttab_query_size(
941 XEN_GUEST_HANDLE(gnttab_query_size_t) uop, unsigned int count)
942 {
943 struct gnttab_query_size op;
944 struct domain *d;
945 domid_t dom;
946 int rc;
948 if ( count != 1 )
949 return -EINVAL;
951 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
952 {
953 gdprintk(XENLOG_INFO, "Fault while reading gnttab_query_size_t.\n");
954 return -EFAULT;
955 }
957 dom = op.dom;
958 if ( dom == DOMID_SELF )
959 {
960 d = rcu_lock_current_domain();
961 }
962 else
963 {
964 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
965 {
966 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
967 op.status = GNTST_bad_domain;
968 goto query_out;
969 }
971 if ( unlikely(!IS_PRIV_FOR(current->domain, d)) )
972 {
973 op.status = GNTST_permission_denied;
974 goto query_out_unlock;
975 }
976 }
978 rc = xsm_grant_query_size(current->domain, d);
979 if ( rc )
980 {
981 op.status = GNTST_permission_denied;
982 goto query_out_unlock;
983 }
985 spin_lock(&d->grant_table->lock);
987 op.nr_frames = nr_grant_frames(d->grant_table);
988 op.max_nr_frames = max_nr_grant_frames;
989 op.status = GNTST_okay;
991 spin_unlock(&d->grant_table->lock);
994 query_out_unlock:
995 rcu_unlock_domain(d);
997 query_out:
998 if ( unlikely(copy_to_guest(uop, &op, 1)) )
999 return -EFAULT;
1001 return 0;
1004 /*
1005 * Check that the given grant reference (rd,ref) allows 'ld' to transfer
1006 * ownership of a page frame. If so, lock down the grant entry.
1007 */
1008 static int
1009 gnttab_prepare_for_transfer(
1010 struct domain *rd, struct domain *ld, grant_ref_t ref)
1012 struct grant_table *rgt;
1013 struct grant_entry *sha;
1014 union grant_combo scombo, prev_scombo, new_scombo;
1015 int retries = 0;
1017 if ( unlikely((rgt = rd->grant_table) == NULL) )
1019 gdprintk(XENLOG_INFO, "Dom %d has no grant table.\n", rd->domain_id);
1020 return 0;
1023 spin_lock(&rgt->lock);
1025 if ( unlikely(ref >= nr_grant_entries(rd->grant_table)) )
1027 gdprintk(XENLOG_INFO,
1028 "Bad grant reference (%d) for transfer to domain(%d).\n",
1029 ref, rd->domain_id);
1030 goto fail;
1033 sha = &shared_entry(rgt, ref);
1035 scombo.word = *(u32 *)&sha->flags;
1037 for ( ; ; )
1039 if ( unlikely(scombo.shorts.flags != GTF_accept_transfer) ||
1040 unlikely(scombo.shorts.domid != ld->domain_id) )
1042 gdprintk(XENLOG_INFO, "Bad flags (%x) or dom (%d). "
1043 "(NB. expected dom %d)\n",
1044 scombo.shorts.flags, scombo.shorts.domid,
1045 ld->domain_id);
1046 goto fail;
1049 new_scombo = scombo;
1050 new_scombo.shorts.flags |= GTF_transfer_committed;
1052 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
1053 scombo.word, new_scombo.word);
1054 if ( likely(prev_scombo.word == scombo.word) )
1055 break;
1057 if ( retries++ == 4 )
1059 gdprintk(XENLOG_WARNING, "Shared grant entry is unstable.\n");
1060 goto fail;
1063 scombo = prev_scombo;
1066 spin_unlock(&rgt->lock);
1067 return 1;
1069 fail:
1070 spin_unlock(&rgt->lock);
1071 return 0;
1074 static long
1075 gnttab_transfer(
1076 XEN_GUEST_HANDLE(gnttab_transfer_t) uop, unsigned int count)
1078 struct domain *d = current->domain;
1079 struct domain *e;
1080 struct page_info *page;
1081 int i;
1082 grant_entry_t *sha;
1083 struct gnttab_transfer gop;
1084 unsigned long mfn;
1085 unsigned int max_bitsize;
1087 for ( i = 0; i < count; i++ )
1089 /* Read from caller address space. */
1090 if ( unlikely(__copy_from_guest_offset(&gop, uop, i, 1)) )
1092 gdprintk(XENLOG_INFO, "gnttab_transfer: error reading req %d/%d\n",
1093 i, count);
1094 return -EFAULT;
1097 mfn = gmfn_to_mfn(d, gop.mfn);
1099 /* Check the passed page frame for basic validity. */
1100 if ( unlikely(!mfn_valid(mfn)) )
1102 gdprintk(XENLOG_INFO, "gnttab_transfer: out-of-range %lx\n",
1103 (unsigned long)gop.mfn);
1104 gop.status = GNTST_bad_page;
1105 goto copyback;
1108 page = mfn_to_page(mfn);
1109 if ( unlikely(is_xen_heap_page(page)) )
1111 gdprintk(XENLOG_INFO, "gnttab_transfer: xen frame %lx\n",
1112 (unsigned long)gop.mfn);
1113 gop.status = GNTST_bad_page;
1114 goto copyback;
1117 if ( steal_page(d, page, 0) < 0 )
1119 gop.status = GNTST_bad_page;
1120 goto copyback;
1123 #ifndef __ia64__ /* IA64 implicitly replaces the old page in steal_page(). */
1124 guest_physmap_remove_page(d, gop.mfn, mfn, 0);
1125 #endif
1126 flush_tlb_mask(&d->domain_dirty_cpumask);
1128 /* Find the target domain. */
1129 if ( unlikely((e = rcu_lock_domain_by_id(gop.domid)) == NULL) )
1131 gdprintk(XENLOG_INFO, "gnttab_transfer: can't find domain %d\n",
1132 gop.domid);
1133 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1134 free_domheap_page(page);
1135 gop.status = GNTST_bad_domain;
1136 goto copyback;
1139 if ( xsm_grant_transfer(d, e) )
1141 gop.status = GNTST_permission_denied;
1142 unlock_and_copyback:
1143 rcu_unlock_domain(e);
1144 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1145 free_domheap_page(page);
1146 goto copyback;
1149 max_bitsize = domain_clamp_alloc_bitsize(
1150 e, BITS_PER_LONG+PAGE_SHIFT-1);
1151 if ( (1UL << (max_bitsize - PAGE_SHIFT)) <= mfn )
1153 struct page_info *new_page;
1154 void *sp, *dp;
1156 new_page = alloc_domheap_page(NULL, MEMF_bits(max_bitsize));
1157 if ( new_page == NULL )
1159 gop.status = GNTST_address_too_big;
1160 goto unlock_and_copyback;
1163 sp = map_domain_page(mfn);
1164 dp = map_domain_page(page_to_mfn(new_page));
1165 memcpy(dp, sp, PAGE_SIZE);
1166 unmap_domain_page(dp);
1167 unmap_domain_page(sp);
1169 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1170 free_domheap_page(page);
1171 page = new_page;
1174 spin_lock(&e->page_alloc_lock);
1176 /*
1177 * Check that 'e' will accept the page and has reservation
1178 * headroom. Also, a domain mustn't have PGC_allocated
1179 * pages when it is dying.
1180 */
1181 if ( unlikely(e->is_dying) ||
1182 unlikely(e->tot_pages >= e->max_pages) ||
1183 unlikely(!gnttab_prepare_for_transfer(e, d, gop.ref)) )
1185 if ( !e->is_dying )
1186 gdprintk(XENLOG_INFO, "gnttab_transfer: "
1187 "Transferee has no reservation "
1188 "headroom (%d,%d) or provided a bad grant ref (%08x) "
1189 "or is dying (%d)\n",
1190 e->tot_pages, e->max_pages, gop.ref, e->is_dying);
1191 spin_unlock(&e->page_alloc_lock);
1192 rcu_unlock_domain(e);
1193 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1194 free_domheap_page(page);
1195 gop.status = GNTST_general_error;
1196 goto copyback;
1199 /* Okay, add the page to 'e'. */
1200 if ( unlikely(e->tot_pages++ == 0) )
1201 get_knownalive_domain(e);
1202 page_list_add_tail(page, &e->page_list);
1203 page_set_owner(page, e);
1205 spin_unlock(&e->page_alloc_lock);
1207 TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
1209 /* Tell the guest about its new page frame. */
1210 spin_lock(&e->grant_table->lock);
1212 sha = &shared_entry(e->grant_table, gop.ref);
1213 guest_physmap_add_page(e, sha->frame, mfn, 0);
1214 sha->frame = mfn;
1215 wmb();
1216 sha->flags |= GTF_transfer_completed;
1218 spin_unlock(&e->grant_table->lock);
1220 rcu_unlock_domain(e);
1222 gop.status = GNTST_okay;
1224 copyback:
1225 if ( unlikely(__copy_to_guest_offset(uop, i, &gop, 1)) )
1227 gdprintk(XENLOG_INFO, "gnttab_transfer: error writing resp "
1228 "%d/%d\n", i, count);
1229 return -EFAULT;
1233 return 0;
1236 /* Undo __acquire_grant_for_copy. Again, this has no effect on page
1237 type and reference counts. */
1238 static void
1239 __release_grant_for_copy(
1240 struct domain *rd, unsigned long gref, int readonly)
1242 grant_entry_t *sha;
1243 struct active_grant_entry *act;
1244 unsigned long r_frame;
1246 spin_lock(&rd->grant_table->lock);
1248 act = &active_entry(rd->grant_table, gref);
1249 sha = &shared_entry(rd->grant_table, gref);
1250 r_frame = act->frame;
1252 if ( readonly )
1254 act->pin -= GNTPIN_hstr_inc;
1256 else
1258 gnttab_mark_dirty(rd, r_frame);
1260 act->pin -= GNTPIN_hstw_inc;
1261 if ( !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) )
1262 gnttab_clear_flag(_GTF_writing, &sha->flags);
1265 if ( !act->pin )
1266 gnttab_clear_flag(_GTF_reading, &sha->flags);
1268 spin_unlock(&rd->grant_table->lock);
1271 /* Grab a frame number from a grant entry and update the flags and pin
1272 count as appropriate. Note that this does *not* update the page
1273 type or reference counts, and does not check that the mfn is
1274 actually valid. */
1275 static int
1276 __acquire_grant_for_copy(
1277 struct domain *rd, unsigned long gref, int readonly,
1278 unsigned long *frame)
1280 grant_entry_t *sha;
1281 struct active_grant_entry *act;
1282 s16 rc = GNTST_okay;
1283 int retries = 0;
1284 union grant_combo scombo, prev_scombo, new_scombo;
1286 spin_lock(&rd->grant_table->lock);
1288 if ( unlikely(gref >= nr_grant_entries(rd->grant_table)) )
1289 PIN_FAIL(unlock_out, GNTST_bad_gntref,
1290 "Bad grant reference %ld\n", gref);
1292 act = &active_entry(rd->grant_table, gref);
1293 sha = &shared_entry(rd->grant_table, gref);
1295 /* If already pinned, check the active domid and avoid refcnt overflow. */
1296 if ( act->pin &&
1297 ((act->domid != current->domain->domain_id) ||
1298 (act->pin & 0x80808080U) != 0) )
1299 PIN_FAIL(unlock_out, GNTST_general_error,
1300 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
1301 act->domid, current->domain->domain_id, act->pin);
1303 if ( !act->pin ||
1304 (!readonly && !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask))) )
1306 scombo.word = *(u32 *)&sha->flags;
1308 for ( ; ; )
1310 /* If not already pinned, check the grant domid and type. */
1311 if ( !act->pin &&
1312 (((scombo.shorts.flags & GTF_type_mask) !=
1313 GTF_permit_access) ||
1314 (scombo.shorts.domid != current->domain->domain_id)) )
1315 PIN_FAIL(unlock_out, GNTST_general_error,
1316 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
1317 scombo.shorts.flags, scombo.shorts.domid,
1318 current->domain->domain_id);
1320 new_scombo = scombo;
1321 new_scombo.shorts.flags |= GTF_reading;
1323 if ( !readonly )
1325 new_scombo.shorts.flags |= GTF_writing;
1326 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
1327 PIN_FAIL(unlock_out, GNTST_general_error,
1328 "Attempt to write-pin a r/o grant entry.\n");
1331 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
1332 scombo.word, new_scombo.word);
1333 if ( likely(prev_scombo.word == scombo.word) )
1334 break;
1336 if ( retries++ == 4 )
1337 PIN_FAIL(unlock_out, GNTST_general_error,
1338 "Shared grant entry is unstable.\n");
1340 scombo = prev_scombo;
1343 if ( !act->pin )
1345 act->domid = scombo.shorts.domid;
1346 act->gfn = sha->frame;
1347 act->frame = gmfn_to_mfn(rd, sha->frame);
1351 act->pin += readonly ? GNTPIN_hstr_inc : GNTPIN_hstw_inc;
1353 *frame = act->frame;
1355 unlock_out:
1356 spin_unlock(&rd->grant_table->lock);
1357 return rc;
1360 static void
1361 __gnttab_copy(
1362 struct gnttab_copy *op)
1364 struct domain *sd = NULL, *dd = NULL;
1365 unsigned long s_frame, d_frame;
1366 char *sp, *dp;
1367 s16 rc = GNTST_okay;
1368 int have_d_grant = 0, have_s_grant = 0, have_s_ref = 0;
1369 int src_is_gref, dest_is_gref;
1371 if ( ((op->source.offset + op->len) > PAGE_SIZE) ||
1372 ((op->dest.offset + op->len) > PAGE_SIZE) )
1373 PIN_FAIL(error_out, GNTST_bad_copy_arg, "copy beyond page area.\n");
1375 src_is_gref = op->flags & GNTCOPY_source_gref;
1376 dest_is_gref = op->flags & GNTCOPY_dest_gref;
1378 if ( (op->source.domid != DOMID_SELF && !src_is_gref ) ||
1379 (op->dest.domid != DOMID_SELF && !dest_is_gref) )
1380 PIN_FAIL(error_out, GNTST_permission_denied,
1381 "only allow copy-by-mfn for DOMID_SELF.\n");
1383 if ( op->source.domid == DOMID_SELF )
1384 sd = rcu_lock_current_domain();
1385 else if ( (sd = rcu_lock_domain_by_id(op->source.domid)) == NULL )
1386 PIN_FAIL(error_out, GNTST_bad_domain,
1387 "couldn't find %d\n", op->source.domid);
1389 if ( op->dest.domid == DOMID_SELF )
1390 dd = rcu_lock_current_domain();
1391 else if ( (dd = rcu_lock_domain_by_id(op->dest.domid)) == NULL )
1392 PIN_FAIL(error_out, GNTST_bad_domain,
1393 "couldn't find %d\n", op->dest.domid);
1395 rc = xsm_grant_copy(sd, dd);
1396 if ( rc )
1398 rc = GNTST_permission_denied;
1399 goto error_out;
1402 if ( src_is_gref )
1404 rc = __acquire_grant_for_copy(sd, op->source.u.ref, 1, &s_frame);
1405 if ( rc != GNTST_okay )
1406 goto error_out;
1407 have_s_grant = 1;
1409 else
1411 s_frame = gmfn_to_mfn(sd, op->source.u.gmfn);
1413 if ( unlikely(!mfn_valid(s_frame)) )
1414 PIN_FAIL(error_out, GNTST_general_error,
1415 "source frame %lx invalid.\n", s_frame);
1416 if ( !get_page(mfn_to_page(s_frame), sd) )
1418 if ( !sd->is_dying )
1419 gdprintk(XENLOG_WARNING, "Could not get src frame %lx\n", s_frame);
1420 rc = GNTST_general_error;
1421 goto error_out;
1423 have_s_ref = 1;
1425 if ( dest_is_gref )
1427 rc = __acquire_grant_for_copy(dd, op->dest.u.ref, 0, &d_frame);
1428 if ( rc != GNTST_okay )
1429 goto error_out;
1430 have_d_grant = 1;
1432 else
1434 d_frame = gmfn_to_mfn(dd, op->dest.u.gmfn);
1436 if ( unlikely(!mfn_valid(d_frame)) )
1437 PIN_FAIL(error_out, GNTST_general_error,
1438 "destination frame %lx invalid.\n", d_frame);
1439 if ( !get_page_and_type(mfn_to_page(d_frame), dd, PGT_writable_page) )
1441 if ( !dd->is_dying )
1442 gdprintk(XENLOG_WARNING, "Could not get dst frame %lx\n", d_frame);
1443 rc = GNTST_general_error;
1444 goto error_out;
1447 sp = map_domain_page(s_frame);
1448 dp = map_domain_page(d_frame);
1450 memcpy(dp + op->dest.offset, sp + op->source.offset, op->len);
1452 unmap_domain_page(dp);
1453 unmap_domain_page(sp);
1455 gnttab_mark_dirty(dd, d_frame);
1457 put_page_and_type(mfn_to_page(d_frame));
1458 error_out:
1459 if ( have_s_ref )
1460 put_page(mfn_to_page(s_frame));
1461 if ( have_s_grant )
1462 __release_grant_for_copy(sd, op->source.u.ref, 1);
1463 if ( have_d_grant )
1464 __release_grant_for_copy(dd, op->dest.u.ref, 0);
1465 if ( sd )
1466 rcu_unlock_domain(sd);
1467 if ( dd )
1468 rcu_unlock_domain(dd);
1469 op->status = rc;
1472 static long
1473 gnttab_copy(
1474 XEN_GUEST_HANDLE(gnttab_copy_t) uop, unsigned int count)
1476 int i;
1477 struct gnttab_copy op;
1479 for ( i = 0; i < count; i++ )
1481 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
1482 return -EFAULT;
1483 __gnttab_copy(&op);
1484 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
1485 return -EFAULT;
1487 return 0;
1490 long
1491 do_grant_table_op(
1492 unsigned int cmd, XEN_GUEST_HANDLE(void) uop, unsigned int count)
1494 long rc;
1495 struct domain *d = current->domain;
1497 if ( count > 512 )
1498 return -EINVAL;
1500 domain_lock(d);
1502 rc = -EFAULT;
1503 switch ( cmd )
1505 case GNTTABOP_map_grant_ref:
1507 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) map =
1508 guest_handle_cast(uop, gnttab_map_grant_ref_t);
1509 if ( unlikely(!guest_handle_okay(map, count)) )
1510 goto out;
1511 rc = gnttab_map_grant_ref(map, count);
1512 break;
1514 case GNTTABOP_unmap_grant_ref:
1516 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) unmap =
1517 guest_handle_cast(uop, gnttab_unmap_grant_ref_t);
1518 if ( unlikely(!guest_handle_okay(unmap, count)) )
1519 goto out;
1520 rc = gnttab_unmap_grant_ref(unmap, count);
1521 break;
1523 case GNTTABOP_unmap_and_replace:
1525 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) unmap =
1526 guest_handle_cast(uop, gnttab_unmap_and_replace_t);
1527 if ( unlikely(!guest_handle_okay(unmap, count)) )
1528 goto out;
1529 rc = -ENOSYS;
1530 if ( unlikely(!replace_grant_supported()) )
1531 goto out;
1532 rc = gnttab_unmap_and_replace(unmap, count);
1533 break;
1535 case GNTTABOP_setup_table:
1537 rc = gnttab_setup_table(
1538 guest_handle_cast(uop, gnttab_setup_table_t), count);
1539 break;
1541 case GNTTABOP_transfer:
1543 XEN_GUEST_HANDLE(gnttab_transfer_t) transfer =
1544 guest_handle_cast(uop, gnttab_transfer_t);
1545 if ( unlikely(!guest_handle_okay(transfer, count)) )
1546 goto out;
1547 rc = gnttab_transfer(transfer, count);
1548 break;
1550 case GNTTABOP_copy:
1552 XEN_GUEST_HANDLE(gnttab_copy_t) copy =
1553 guest_handle_cast(uop, gnttab_copy_t);
1554 if ( unlikely(!guest_handle_okay(copy, count)) )
1555 goto out;
1556 rc = gnttab_copy(copy, count);
1557 break;
1559 case GNTTABOP_query_size:
1561 rc = gnttab_query_size(
1562 guest_handle_cast(uop, gnttab_query_size_t), count);
1563 break;
1565 default:
1566 rc = -ENOSYS;
1567 break;
1570 out:
1571 domain_unlock(d);
1573 return rc;
1576 #ifdef CONFIG_COMPAT
1577 #include "compat/grant_table.c"
1578 #endif
1580 static unsigned int max_nr_active_grant_frames(void)
1582 return (((max_nr_grant_frames * (PAGE_SIZE / sizeof(grant_entry_t))) +
1583 ((PAGE_SIZE / sizeof(struct active_grant_entry))-1))
1584 / (PAGE_SIZE / sizeof(struct active_grant_entry)));
1587 int
1588 grant_table_create(
1589 struct domain *d)
1591 struct grant_table *t;
1592 int i;
1594 /* If this sizeof assertion fails, fix the function: shared_index */
1595 ASSERT(sizeof(grant_entry_t) == 8);
1597 if ( (t = xmalloc(struct grant_table)) == NULL )
1598 goto no_mem_0;
1600 /* Simple stuff. */
1601 memset(t, 0, sizeof(*t));
1602 spin_lock_init(&t->lock);
1603 t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
1605 /* Active grant table. */
1606 if ( (t->active = xmalloc_array(struct active_grant_entry *,
1607 max_nr_active_grant_frames())) == NULL )
1608 goto no_mem_1;
1609 memset(t->active, 0, max_nr_active_grant_frames() * sizeof(t->active[0]));
1610 for ( i = 0;
1611 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1613 if ( (t->active[i] = alloc_xenheap_page()) == NULL )
1614 goto no_mem_2;
1615 clear_page(t->active[i]);
1618 /* Tracking of mapped foreign frames table */
1619 if ( (t->maptrack = xmalloc_array(struct grant_mapping *,
1620 max_nr_maptrack_frames())) == NULL )
1621 goto no_mem_2;
1622 memset(t->maptrack, 0, max_nr_maptrack_frames() * sizeof(t->maptrack[0]));
1623 if ( (t->maptrack[0] = alloc_xenheap_page()) == NULL )
1624 goto no_mem_3;
1625 clear_page(t->maptrack[0]);
1626 t->maptrack_limit = PAGE_SIZE / sizeof(struct grant_mapping);
1627 for ( i = 0; i < t->maptrack_limit; i++ )
1628 t->maptrack[0][i].ref = i+1;
1630 /* Shared grant table. */
1631 if ( (t->shared = xmalloc_array(struct grant_entry *,
1632 max_nr_grant_frames)) == NULL )
1633 goto no_mem_3;
1634 memset(t->shared, 0, max_nr_grant_frames * sizeof(t->shared[0]));
1635 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1637 if ( (t->shared[i] = alloc_xenheap_page()) == NULL )
1638 goto no_mem_4;
1639 clear_page(t->shared[i]);
1642 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1643 gnttab_create_shared_page(d, t, i);
1645 /* Okay, install the structure. */
1646 d->grant_table = t;
1647 return 0;
1649 no_mem_4:
1650 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1651 free_xenheap_page(t->shared[i]);
1652 xfree(t->shared);
1653 no_mem_3:
1654 free_xenheap_page(t->maptrack[0]);
1655 xfree(t->maptrack);
1656 no_mem_2:
1657 for ( i = 0;
1658 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1659 free_xenheap_page(t->active[i]);
1660 xfree(t->active);
1661 no_mem_1:
1662 xfree(t);
1663 no_mem_0:
1664 return -ENOMEM;
1667 void
1668 gnttab_release_mappings(
1669 struct domain *d)
1671 struct grant_table *gt = d->grant_table;
1672 struct grant_mapping *map;
1673 grant_ref_t ref;
1674 grant_handle_t handle;
1675 struct domain *rd;
1676 struct active_grant_entry *act;
1677 struct grant_entry *sha;
1679 BUG_ON(!d->is_dying);
1681 for ( handle = 0; handle < gt->maptrack_limit; handle++ )
1683 map = &maptrack_entry(gt, handle);
1684 if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
1685 continue;
1687 ref = map->ref;
1689 gdprintk(XENLOG_INFO, "Grant release (%hu) ref:(%hu) "
1690 "flags:(%x) dom:(%hu)\n",
1691 handle, ref, map->flags, map->domid);
1693 rd = rcu_lock_domain_by_id(map->domid);
1694 if ( rd == NULL )
1696 /* Nothing to clear up... */
1697 map->flags = 0;
1698 continue;
1701 spin_lock(&rd->grant_table->lock);
1703 act = &active_entry(rd->grant_table, ref);
1704 sha = &shared_entry(rd->grant_table, ref);
1706 if ( map->flags & GNTMAP_readonly )
1708 if ( map->flags & GNTMAP_device_map )
1710 BUG_ON(!(act->pin & GNTPIN_devr_mask));
1711 act->pin -= GNTPIN_devr_inc;
1712 if ( !is_iomem_page(act->frame) )
1713 put_page(mfn_to_page(act->frame));
1716 if ( map->flags & GNTMAP_host_map )
1718 BUG_ON(!(act->pin & GNTPIN_hstr_mask));
1719 act->pin -= GNTPIN_hstr_inc;
1720 if ( gnttab_release_host_mappings &&
1721 !is_iomem_page(act->frame) )
1722 put_page(mfn_to_page(act->frame));
1725 else
1727 if ( map->flags & GNTMAP_device_map )
1729 BUG_ON(!(act->pin & GNTPIN_devw_mask));
1730 act->pin -= GNTPIN_devw_inc;
1731 if ( !is_iomem_page(act->frame) )
1732 put_page_and_type(mfn_to_page(act->frame));
1735 if ( map->flags & GNTMAP_host_map )
1737 BUG_ON(!(act->pin & GNTPIN_hstw_mask));
1738 act->pin -= GNTPIN_hstw_inc;
1739 if ( gnttab_release_host_mappings &&
1740 !is_iomem_page(act->frame) )
1742 if ( gnttab_host_mapping_get_page_type(map, d, rd) )
1743 put_page_type(mfn_to_page(act->frame));
1744 put_page(mfn_to_page(act->frame));
1748 if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
1749 gnttab_clear_flag(_GTF_writing, &sha->flags);
1752 if ( act->pin == 0 )
1753 gnttab_clear_flag(_GTF_reading, &sha->flags);
1755 spin_unlock(&rd->grant_table->lock);
1757 rcu_unlock_domain(rd);
1759 map->flags = 0;
1764 void
1765 grant_table_destroy(
1766 struct domain *d)
1768 struct grant_table *t = d->grant_table;
1769 int i;
1771 if ( t == NULL )
1772 return;
1774 for ( i = 0; i < nr_grant_frames(t); i++ )
1775 free_xenheap_page(t->shared[i]);
1776 xfree(t->shared);
1778 for ( i = 0; i < nr_maptrack_frames(t); i++ )
1779 free_xenheap_page(t->maptrack[i]);
1780 xfree(t->maptrack);
1782 for ( i = 0; i < nr_active_grant_frames(t); i++ )
1783 free_xenheap_page(t->active[i]);
1784 xfree(t->active);
1786 xfree(t);
1787 d->grant_table = NULL;
1790 /*
1791 * Local variables:
1792 * mode: C
1793 * c-set-style: "BSD"
1794 * c-basic-offset: 4
1795 * tab-width: 4
1796 * indent-tabs-mode: nil
1797 * End:
1798 */