ia64/xen-unstable

view xen/common/grant_table.c @ 18594:5e4e234d58be

x86: Define __per_cpu_shift label to help kdump/crashdump.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Oct 08 13:11:06 2008 +0100 (2008-10-08)
parents dbd5d4eeb46a
children 489f35400ef2
line source
1 /******************************************************************************
2 * common/grant_table.c
3 *
4 * Mechanism for granting foreign access to page frames, and receiving
5 * page-ownership transfers.
6 *
7 * Copyright (c) 2005-2006 Christopher Clark
8 * Copyright (c) 2004 K A Fraser
9 * Copyright (c) 2005 Andrew Warfield
10 * Modifications by Geoffrey Lefebvre are (c) Intel Research Cambridge
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
27 #include <xen/config.h>
28 #include <xen/iocap.h>
29 #include <xen/lib.h>
30 #include <xen/sched.h>
31 #include <xen/mm.h>
32 #include <xen/trace.h>
33 #include <xen/guest_access.h>
34 #include <xen/domain_page.h>
35 #include <xen/iommu.h>
36 #include <xen/paging.h>
37 #include <xsm/xsm.h>
39 #ifndef max_nr_grant_frames
40 unsigned int max_nr_grant_frames = DEFAULT_MAX_NR_GRANT_FRAMES;
41 integer_param("gnttab_max_nr_frames", max_nr_grant_frames);
42 #endif
44 /* The maximum number of grant mappings is defined as a multiplier of the
45 * maximum number of grant table entries. This defines the multiplier used.
46 * Pretty arbitrary. [POLICY]
47 */
48 #define MAX_MAPTRACK_TO_GRANTS_RATIO 8
50 /*
51 * The first two members of a grant entry are updated as a combined pair.
52 * The following union allows that to happen in an endian-neutral fashion.
53 */
54 union grant_combo {
55 uint32_t word;
56 struct {
57 uint16_t flags;
58 domid_t domid;
59 } shorts;
60 };
62 /* Used to share code between unmap_grant_ref and unmap_and_replace. */
63 struct gnttab_unmap_common {
64 /* Input */
65 uint64_t host_addr;
66 uint64_t dev_bus_addr;
67 uint64_t new_addr;
68 grant_handle_t handle;
70 /* Return */
71 int16_t status;
73 /* Shared state beteen *_unmap and *_unmap_complete */
74 u16 flags;
75 unsigned long frame;
76 struct grant_mapping *map;
77 struct domain *rd;
78 };
80 /* Number of unmap operations that are done between each tlb flush */
81 #define GNTTAB_UNMAP_BATCH_SIZE 32
84 #define PIN_FAIL(_lbl, _rc, _f, _a...) \
85 do { \
86 gdprintk(XENLOG_WARNING, _f, ## _a ); \
87 rc = (_rc); \
88 goto _lbl; \
89 } while ( 0 )
91 #define MAPTRACK_PER_PAGE (PAGE_SIZE / sizeof(struct grant_mapping))
92 #define maptrack_entry(t, e) \
93 ((t)->maptrack[(e)/MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE])
95 static inline unsigned int
96 nr_maptrack_frames(struct grant_table *t)
97 {
98 return t->maptrack_limit / MAPTRACK_PER_PAGE;
99 }
101 static unsigned inline int max_nr_maptrack_frames(void)
102 {
103 return (max_nr_grant_frames * MAX_MAPTRACK_TO_GRANTS_RATIO);
104 }
107 #define SHGNT_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_t))
108 #define shared_entry(t, e) \
109 ((t)->shared[(e)/SHGNT_PER_PAGE][(e)%SHGNT_PER_PAGE])
110 #define ACGNT_PER_PAGE (PAGE_SIZE / sizeof(struct active_grant_entry))
111 #define active_entry(t, e) \
112 ((t)->active[(e)/ACGNT_PER_PAGE][(e)%ACGNT_PER_PAGE])
114 static inline int
115 __get_maptrack_handle(
116 struct grant_table *t)
117 {
118 unsigned int h;
119 if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) )
120 return -1;
121 t->maptrack_head = maptrack_entry(t, h).ref;
122 t->map_count++;
123 return h;
124 }
126 static inline void
127 put_maptrack_handle(
128 struct grant_table *t, int handle)
129 {
130 maptrack_entry(t, handle).ref = t->maptrack_head;
131 t->maptrack_head = handle;
132 t->map_count--;
133 }
135 static inline int
136 get_maptrack_handle(
137 struct grant_table *lgt)
138 {
139 int i;
140 grant_handle_t handle;
141 struct grant_mapping *new_mt;
142 unsigned int new_mt_limit, nr_frames;
144 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
145 {
146 spin_lock(&lgt->lock);
148 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
149 {
150 nr_frames = nr_maptrack_frames(lgt);
151 if ( nr_frames >= max_nr_maptrack_frames() )
152 {
153 spin_unlock(&lgt->lock);
154 return -1;
155 }
157 new_mt = alloc_xenheap_page();
158 if ( new_mt == NULL )
159 {
160 spin_unlock(&lgt->lock);
161 return -1;
162 }
164 clear_page(new_mt);
166 new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE;
168 for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
169 {
170 new_mt[i % MAPTRACK_PER_PAGE].ref = i+1;
171 new_mt[i % MAPTRACK_PER_PAGE].flags = 0;
172 }
174 lgt->maptrack[nr_frames] = new_mt;
175 lgt->maptrack_limit = new_mt_limit;
177 gdprintk(XENLOG_INFO,
178 "Increased maptrack size to %u frames.\n", nr_frames + 1);
179 handle = __get_maptrack_handle(lgt);
180 }
182 spin_unlock(&lgt->lock);
183 }
184 return handle;
185 }
187 /*
188 * Returns 0 if TLB flush / invalidate required by caller.
189 * va will indicate the address to be invalidated.
190 *
191 * addr is _either_ a host virtual address, or the address of the pte to
192 * update, as indicated by the GNTMAP_contains_pte flag.
193 */
194 static void
195 __gnttab_map_grant_ref(
196 struct gnttab_map_grant_ref *op)
197 {
198 struct domain *ld, *rd;
199 struct vcpu *led;
200 int handle;
201 unsigned long frame = 0, nr_gets = 0;
202 int rc = GNTST_okay;
203 u32 old_pin;
204 unsigned int cache_flags;
205 struct active_grant_entry *act;
206 struct grant_mapping *mt;
207 grant_entry_t *sha;
208 union grant_combo scombo, prev_scombo, new_scombo;
210 /*
211 * We bound the number of times we retry CMPXCHG on memory locations that
212 * we share with a guest OS. The reason is that the guest can modify that
213 * location at a higher rate than we can read-modify-CMPXCHG, so the guest
214 * could cause us to livelock. There are a few cases where it is valid for
215 * the guest to race our updates (e.g., to change the GTF_readonly flag),
216 * so we allow a few retries before failing.
217 */
218 int retries = 0;
220 led = current;
221 ld = led->domain;
223 if ( unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) )
224 {
225 gdprintk(XENLOG_INFO, "Bad flags in grant map op (%x).\n", op->flags);
226 op->status = GNTST_bad_gntref;
227 return;
228 }
230 if ( unlikely((rd = rcu_lock_domain_by_id(op->dom)) == NULL) )
231 {
232 gdprintk(XENLOG_INFO, "Could not find domain %d\n", op->dom);
233 op->status = GNTST_bad_domain;
234 return;
235 }
237 rc = xsm_grant_mapref(ld, rd, op->flags);
238 if ( rc )
239 {
240 rcu_unlock_domain(rd);
241 op->status = GNTST_permission_denied;
242 return;
243 }
245 if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) )
246 {
247 rcu_unlock_domain(rd);
248 gdprintk(XENLOG_INFO, "Failed to obtain maptrack handle.\n");
249 op->status = GNTST_no_device_space;
250 return;
251 }
253 spin_lock(&rd->grant_table->lock);
255 /* Bounds check on the grant ref */
256 if ( unlikely(op->ref >= nr_grant_entries(rd->grant_table)))
257 PIN_FAIL(unlock_out, GNTST_bad_gntref, "Bad ref (%d).\n", op->ref);
259 act = &active_entry(rd->grant_table, op->ref);
260 sha = &shared_entry(rd->grant_table, op->ref);
262 /* If already pinned, check the active domid and avoid refcnt overflow. */
263 if ( act->pin &&
264 ((act->domid != ld->domain_id) ||
265 (act->pin & 0x80808080U) != 0) )
266 PIN_FAIL(unlock_out, GNTST_general_error,
267 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
268 act->domid, ld->domain_id, act->pin);
270 if ( !act->pin ||
271 (!(op->flags & GNTMAP_readonly) &&
272 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask))) )
273 {
274 scombo.word = *(u32 *)&sha->flags;
276 /*
277 * This loop attempts to set the access (reading/writing) flags
278 * in the grant table entry. It tries a cmpxchg on the field
279 * up to five times, and then fails under the assumption that
280 * the guest is misbehaving.
281 */
282 for ( ; ; )
283 {
284 /* If not already pinned, check the grant domid and type. */
285 if ( !act->pin &&
286 (((scombo.shorts.flags & GTF_type_mask) !=
287 GTF_permit_access) ||
288 (scombo.shorts.domid != ld->domain_id)) )
289 PIN_FAIL(unlock_out, GNTST_general_error,
290 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
291 scombo.shorts.flags, scombo.shorts.domid,
292 ld->domain_id);
294 new_scombo = scombo;
295 new_scombo.shorts.flags |= GTF_reading;
297 if ( !(op->flags & GNTMAP_readonly) )
298 {
299 new_scombo.shorts.flags |= GTF_writing;
300 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
301 PIN_FAIL(unlock_out, GNTST_general_error,
302 "Attempt to write-pin a r/o grant entry.\n");
303 }
305 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
306 scombo.word, new_scombo.word);
307 if ( likely(prev_scombo.word == scombo.word) )
308 break;
310 if ( retries++ == 4 )
311 PIN_FAIL(unlock_out, GNTST_general_error,
312 "Shared grant entry is unstable.\n");
314 scombo = prev_scombo;
315 }
317 if ( !act->pin )
318 {
319 act->domid = scombo.shorts.domid;
320 act->frame = gmfn_to_mfn(rd, sha->frame);
321 }
322 }
324 old_pin = act->pin;
325 if ( op->flags & GNTMAP_device_map )
326 act->pin += (op->flags & GNTMAP_readonly) ?
327 GNTPIN_devr_inc : GNTPIN_devw_inc;
328 if ( op->flags & GNTMAP_host_map )
329 act->pin += (op->flags & GNTMAP_readonly) ?
330 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
332 frame = act->frame;
334 cache_flags = (sha->flags & (GTF_PAT | GTF_PWT | GTF_PCD) );
336 spin_unlock(&rd->grant_table->lock);
338 if ( is_iomem_page(frame) )
339 {
340 if ( !iomem_access_permitted(rd, frame, frame) )
341 {
342 gdprintk(XENLOG_WARNING,
343 "Iomem mapping not permitted %lx (domain %d)\n",
344 frame, rd->domain_id);
345 rc = GNTST_general_error;
346 goto undo_out;
347 }
349 rc = create_grant_host_mapping(
350 op->host_addr, frame, op->flags, cache_flags);
351 if ( rc != GNTST_okay )
352 goto undo_out;
353 }
354 else
355 {
356 if ( unlikely(!mfn_valid(frame)) ||
357 unlikely(!(gnttab_host_mapping_get_page_type(op, ld, rd) ?
358 get_page_and_type(mfn_to_page(frame), rd,
359 PGT_writable_page) :
360 get_page(mfn_to_page(frame), rd))) )
361 {
362 if ( !rd->is_dying )
363 gdprintk(XENLOG_WARNING, "Could not pin grant frame %lx\n",
364 frame);
365 rc = GNTST_general_error;
366 goto undo_out;
367 }
369 nr_gets++;
370 if ( op->flags & GNTMAP_host_map )
371 {
372 rc = create_grant_host_mapping(op->host_addr, frame, op->flags, 0);
373 if ( rc != GNTST_okay )
374 goto undo_out;
376 if ( op->flags & GNTMAP_device_map )
377 {
378 nr_gets++;
379 (void)get_page(mfn_to_page(frame), rd);
380 if ( !(op->flags & GNTMAP_readonly) )
381 get_page_type(mfn_to_page(frame), PGT_writable_page);
382 }
383 }
384 }
386 if ( need_iommu(ld) &&
387 !(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
388 (act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
389 {
390 if ( iommu_map_page(ld, mfn_to_gmfn(ld, frame), frame) )
391 {
392 rc = GNTST_general_error;
393 goto undo_out;
394 }
395 }
397 TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
399 mt = &maptrack_entry(ld->grant_table, handle);
400 mt->domid = op->dom;
401 mt->ref = op->ref;
402 mt->flags = op->flags;
404 op->dev_bus_addr = (u64)frame << PAGE_SHIFT;
405 op->handle = handle;
406 op->status = GNTST_okay;
408 rcu_unlock_domain(rd);
409 return;
411 undo_out:
412 if ( nr_gets > 1 )
413 {
414 if ( !(op->flags & GNTMAP_readonly) )
415 put_page_type(mfn_to_page(frame));
416 put_page(mfn_to_page(frame));
417 }
418 if ( nr_gets > 0 )
419 {
420 if ( gnttab_host_mapping_get_page_type(op, ld, rd) )
421 put_page_type(mfn_to_page(frame));
422 put_page(mfn_to_page(frame));
423 }
425 spin_lock(&rd->grant_table->lock);
427 act = &active_entry(rd->grant_table, op->ref);
428 sha = &shared_entry(rd->grant_table, op->ref);
430 if ( op->flags & GNTMAP_device_map )
431 act->pin -= (op->flags & GNTMAP_readonly) ?
432 GNTPIN_devr_inc : GNTPIN_devw_inc;
433 if ( op->flags & GNTMAP_host_map )
434 act->pin -= (op->flags & GNTMAP_readonly) ?
435 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
437 if ( !(op->flags & GNTMAP_readonly) &&
438 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
439 gnttab_clear_flag(_GTF_writing, &sha->flags);
441 if ( !act->pin )
442 gnttab_clear_flag(_GTF_reading, &sha->flags);
444 unlock_out:
445 spin_unlock(&rd->grant_table->lock);
446 op->status = rc;
447 put_maptrack_handle(ld->grant_table, handle);
448 rcu_unlock_domain(rd);
449 }
451 static long
452 gnttab_map_grant_ref(
453 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) uop, unsigned int count)
454 {
455 int i;
456 struct gnttab_map_grant_ref op;
458 for ( i = 0; i < count; i++ )
459 {
460 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
461 return -EFAULT;
462 __gnttab_map_grant_ref(&op);
463 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
464 return -EFAULT;
465 }
467 return 0;
468 }
470 static void
471 __gnttab_unmap_common(
472 struct gnttab_unmap_common *op)
473 {
474 domid_t dom;
475 struct domain *ld, *rd;
476 struct active_grant_entry *act;
477 grant_entry_t *sha;
478 s16 rc = 0;
479 u32 old_pin;
481 ld = current->domain;
483 op->frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT);
485 if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) )
486 {
487 gdprintk(XENLOG_INFO, "Bad handle (%d).\n", op->handle);
488 op->status = GNTST_bad_handle;
489 return;
490 }
492 op->map = &maptrack_entry(ld->grant_table, op->handle);
494 if ( unlikely(!op->map->flags) )
495 {
496 gdprintk(XENLOG_INFO, "Zero flags for handle (%d).\n", op->handle);
497 op->status = GNTST_bad_handle;
498 return;
499 }
501 dom = op->map->domid;
502 op->flags = op->map->flags;
504 if ( unlikely((op->rd = rd = rcu_lock_domain_by_id(dom)) == NULL) )
505 {
506 /* This can happen when a grant is implicitly unmapped. */
507 gdprintk(XENLOG_INFO, "Could not find domain %d\n", dom);
508 domain_crash(ld); /* naughty... */
509 return;
510 }
512 rc = xsm_grant_unmapref(ld, rd);
513 if ( rc )
514 {
515 rcu_unlock_domain(rd);
516 op->status = GNTST_permission_denied;
517 return;
518 }
520 TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom);
522 spin_lock(&rd->grant_table->lock);
524 act = &active_entry(rd->grant_table, op->map->ref);
525 sha = &shared_entry(rd->grant_table, op->map->ref);
526 old_pin = act->pin;
528 if ( op->frame == 0 )
529 {
530 op->frame = act->frame;
531 }
532 else
533 {
534 if ( unlikely(op->frame != act->frame) )
535 PIN_FAIL(unmap_out, GNTST_general_error,
536 "Bad frame number doesn't match gntref. (%lx != %lx)\n",
537 op->frame, act->frame);
538 if ( op->flags & GNTMAP_device_map )
539 {
540 ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
541 op->map->flags &= ~GNTMAP_device_map;
542 if ( op->flags & GNTMAP_readonly )
543 act->pin -= GNTPIN_devr_inc;
544 else
545 act->pin -= GNTPIN_devw_inc;
546 }
547 }
549 if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
550 {
551 if ( (rc = replace_grant_host_mapping(op->host_addr,
552 op->frame, op->new_addr,
553 op->flags)) < 0 )
554 goto unmap_out;
556 ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
557 op->map->flags &= ~GNTMAP_host_map;
558 if ( op->flags & GNTMAP_readonly )
559 act->pin -= GNTPIN_hstr_inc;
560 else
561 act->pin -= GNTPIN_hstw_inc;
562 }
564 if ( need_iommu(ld) &&
565 (old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
566 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
567 {
568 if ( iommu_unmap_page(ld, mfn_to_gmfn(ld, op->frame)) )
569 {
570 rc = GNTST_general_error;
571 goto unmap_out;
572 }
573 }
575 /* If just unmapped a writable mapping, mark as dirtied */
576 if ( !(op->flags & GNTMAP_readonly) )
577 gnttab_mark_dirty(rd, op->frame);
579 unmap_out:
580 op->status = rc;
581 spin_unlock(&rd->grant_table->lock);
582 rcu_unlock_domain(rd);
583 }
585 static void
586 __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
587 {
588 struct domain *ld, *rd;
589 struct active_grant_entry *act;
590 grant_entry_t *sha;
592 rd = op->rd;
594 if ( rd == NULL )
595 {
596 /*
597 * Suggests that __gntab_unmap_common failed in
598 * rcu_lock_domain_by_id() or earlier, and so we have nothing
599 * to complete
600 */
601 return;
602 }
604 ld = current->domain;
606 rcu_lock_domain(rd);
607 spin_lock(&rd->grant_table->lock);
609 act = &active_entry(rd->grant_table, op->map->ref);
610 sha = &shared_entry(rd->grant_table, op->map->ref);
612 if ( unlikely(op->frame != act->frame) )
613 {
614 /*
615 * Suggests that __gntab_unmap_common failed early and so
616 * nothing further to do
617 */
618 goto unmap_out;
619 }
621 if ( op->flags & GNTMAP_device_map )
622 {
623 if ( !is_iomem_page(act->frame) )
624 {
625 if ( op->flags & GNTMAP_readonly )
626 put_page(mfn_to_page(op->frame));
627 else
628 put_page_and_type(mfn_to_page(op->frame));
629 }
630 }
632 if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
633 {
634 if ( op->status != 0 )
635 {
636 /*
637 * Suggests that __gntab_unmap_common failed in
638 * replace_grant_host_mapping() so nothing further to do
639 */
640 goto unmap_out;
641 }
643 if ( !is_iomem_page(op->frame) )
644 {
645 if ( gnttab_host_mapping_get_page_type(op, ld, rd) )
646 put_page_type(mfn_to_page(op->frame));
647 put_page(mfn_to_page(op->frame));
648 }
649 }
651 if ( (op->map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
652 {
653 op->map->flags = 0;
654 put_maptrack_handle(ld->grant_table, op->handle);
655 }
657 if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
658 !(op->flags & GNTMAP_readonly) )
659 gnttab_clear_flag(_GTF_writing, &sha->flags);
661 if ( act->pin == 0 )
662 gnttab_clear_flag(_GTF_reading, &sha->flags);
664 unmap_out:
665 spin_unlock(&rd->grant_table->lock);
666 rcu_unlock_domain(rd);
667 }
669 static void
670 __gnttab_unmap_grant_ref(
671 struct gnttab_unmap_grant_ref *op,
672 struct gnttab_unmap_common *common)
673 {
674 common->host_addr = op->host_addr;
675 common->dev_bus_addr = op->dev_bus_addr;
676 common->handle = op->handle;
678 /* Intialise these in case common contains old state */
679 common->new_addr = 0;
680 common->rd = NULL;
682 __gnttab_unmap_common(common);
683 op->status = common->status;
684 }
687 static long
688 gnttab_unmap_grant_ref(
689 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) uop, unsigned int count)
690 {
691 int i, c, partial_done, done = 0;
692 struct gnttab_unmap_grant_ref op;
693 struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
695 while ( count != 0 )
696 {
697 c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
698 partial_done = 0;
700 for ( i = 0; i < c; i++ )
701 {
702 if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
703 goto fault;
704 __gnttab_unmap_grant_ref(&op, &(common[i]));
705 ++partial_done;
706 if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
707 goto fault;
708 }
710 flush_tlb_mask(current->domain->domain_dirty_cpumask);
712 for ( i = 0; i < partial_done; i++ )
713 __gnttab_unmap_common_complete(&(common[i]));
715 count -= c;
716 done += c;
717 }
719 return 0;
721 fault:
722 flush_tlb_mask(current->domain->domain_dirty_cpumask);
724 for ( i = 0; i < partial_done; i++ )
725 __gnttab_unmap_common_complete(&(common[i]));
726 return -EFAULT;
727 }
729 static void
730 __gnttab_unmap_and_replace(
731 struct gnttab_unmap_and_replace *op,
732 struct gnttab_unmap_common *common)
733 {
734 common->host_addr = op->host_addr;
735 common->new_addr = op->new_addr;
736 common->handle = op->handle;
738 /* Intialise these in case common contains old state */
739 common->dev_bus_addr = 0;
740 common->rd = NULL;
742 __gnttab_unmap_common(common);
743 op->status = common->status;
744 }
746 static long
747 gnttab_unmap_and_replace(
748 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) uop, unsigned int count)
749 {
750 int i, c, partial_done, done = 0;
751 struct gnttab_unmap_and_replace op;
752 struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
754 while ( count != 0 )
755 {
756 c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
757 partial_done = 0;
759 for ( i = 0; i < c; i++ )
760 {
761 if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
762 goto fault;
763 __gnttab_unmap_and_replace(&op, &(common[i]));
764 ++partial_done;
765 if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
766 goto fault;
767 }
769 flush_tlb_mask(current->domain->domain_dirty_cpumask);
771 for ( i = 0; i < partial_done; i++ )
772 __gnttab_unmap_common_complete(&(common[i]));
774 count -= c;
775 done += c;
776 }
778 return 0;
780 fault:
781 flush_tlb_mask(current->domain->domain_dirty_cpumask);
783 for ( i = 0; i < partial_done; i++ )
784 __gnttab_unmap_common_complete(&(common[i]));
785 return -EFAULT;
786 }
788 int
789 gnttab_grow_table(struct domain *d, unsigned int req_nr_frames)
790 {
791 /* d's grant table lock must be held by the caller */
793 struct grant_table *gt = d->grant_table;
794 unsigned int i;
796 ASSERT(req_nr_frames <= max_nr_grant_frames);
798 gdprintk(XENLOG_INFO,
799 "Expanding dom (%d) grant table from (%d) to (%d) frames.\n",
800 d->domain_id, nr_grant_frames(gt), req_nr_frames);
802 /* Active */
803 for ( i = nr_active_grant_frames(gt);
804 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
805 {
806 if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
807 goto active_alloc_failed;
808 clear_page(gt->active[i]);
809 }
811 /* Shared */
812 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
813 {
814 if ( (gt->shared[i] = alloc_xenheap_page()) == NULL )
815 goto shared_alloc_failed;
816 clear_page(gt->shared[i]);
817 }
819 /* Share the new shared frames with the recipient domain */
820 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
821 gnttab_create_shared_page(d, gt, i);
823 gt->nr_grant_frames = req_nr_frames;
825 return 1;
827 shared_alloc_failed:
828 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
829 {
830 free_xenheap_page(gt->shared[i]);
831 gt->shared[i] = NULL;
832 }
833 active_alloc_failed:
834 for ( i = nr_active_grant_frames(gt);
835 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
836 {
837 free_xenheap_page(gt->active[i]);
838 gt->active[i] = NULL;
839 }
840 gdprintk(XENLOG_INFO, "Allocation failure when expanding grant table.\n");
841 return 0;
842 }
844 static long
845 gnttab_setup_table(
846 XEN_GUEST_HANDLE(gnttab_setup_table_t) uop, unsigned int count)
847 {
848 struct gnttab_setup_table op;
849 struct domain *d;
850 int i;
851 unsigned long gmfn;
852 domid_t dom;
854 if ( count != 1 )
855 return -EINVAL;
857 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
858 {
859 gdprintk(XENLOG_INFO, "Fault while reading gnttab_setup_table_t.\n");
860 return -EFAULT;
861 }
863 if ( unlikely(op.nr_frames > max_nr_grant_frames) )
864 {
865 gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table frames"
866 " per domain.\n",
867 max_nr_grant_frames);
868 op.status = GNTST_general_error;
869 goto out1;
870 }
872 dom = op.dom;
873 if ( dom == DOMID_SELF )
874 {
875 d = rcu_lock_current_domain();
876 }
877 else
878 {
879 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
880 {
881 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
882 op.status = GNTST_bad_domain;
883 goto out1;
884 }
886 if ( unlikely(!IS_PRIV_FOR(current->domain, d)) )
887 {
888 op.status = GNTST_permission_denied;
889 goto out2;
890 }
891 }
893 if ( xsm_grant_setup(current->domain, d) )
894 {
895 op.status = GNTST_permission_denied;
896 goto out2;
897 }
899 spin_lock(&d->grant_table->lock);
901 if ( (op.nr_frames > nr_grant_frames(d->grant_table)) &&
902 !gnttab_grow_table(d, op.nr_frames) )
903 {
904 gdprintk(XENLOG_INFO,
905 "Expand grant table to %d failed. Current: %d Max: %d.\n",
906 op.nr_frames,
907 nr_grant_frames(d->grant_table),
908 max_nr_grant_frames);
909 op.status = GNTST_general_error;
910 goto out3;
911 }
913 op.status = GNTST_okay;
914 for ( i = 0; i < op.nr_frames; i++ )
915 {
916 gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
917 (void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
918 }
920 out3:
921 spin_unlock(&d->grant_table->lock);
922 out2:
923 rcu_unlock_domain(d);
924 out1:
925 if ( unlikely(copy_to_guest(uop, &op, 1)) )
926 return -EFAULT;
928 return 0;
929 }
931 static long
932 gnttab_query_size(
933 XEN_GUEST_HANDLE(gnttab_query_size_t) uop, unsigned int count)
934 {
935 struct gnttab_query_size op;
936 struct domain *d;
937 domid_t dom;
938 int rc;
940 if ( count != 1 )
941 return -EINVAL;
943 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
944 {
945 gdprintk(XENLOG_INFO, "Fault while reading gnttab_query_size_t.\n");
946 return -EFAULT;
947 }
949 dom = op.dom;
950 if ( dom == DOMID_SELF )
951 {
952 d = rcu_lock_current_domain();
953 }
954 else
955 {
956 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
957 {
958 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
959 op.status = GNTST_bad_domain;
960 goto query_out;
961 }
963 if ( unlikely(!IS_PRIV_FOR(current->domain, d)) )
964 {
965 op.status = GNTST_permission_denied;
966 goto query_out_unlock;
967 }
968 }
970 rc = xsm_grant_query_size(current->domain, d);
971 if ( rc )
972 {
973 op.status = GNTST_permission_denied;
974 goto query_out_unlock;
975 }
977 spin_lock(&d->grant_table->lock);
979 op.nr_frames = nr_grant_frames(d->grant_table);
980 op.max_nr_frames = max_nr_grant_frames;
981 op.status = GNTST_okay;
983 spin_unlock(&d->grant_table->lock);
986 query_out_unlock:
987 rcu_unlock_domain(d);
989 query_out:
990 if ( unlikely(copy_to_guest(uop, &op, 1)) )
991 return -EFAULT;
993 return 0;
994 }
996 /*
997 * Check that the given grant reference (rd,ref) allows 'ld' to transfer
998 * ownership of a page frame. If so, lock down the grant entry.
999 */
1000 static int
1001 gnttab_prepare_for_transfer(
1002 struct domain *rd, struct domain *ld, grant_ref_t ref)
1004 struct grant_table *rgt;
1005 struct grant_entry *sha;
1006 union grant_combo scombo, prev_scombo, new_scombo;
1007 int retries = 0;
1009 if ( unlikely((rgt = rd->grant_table) == NULL) )
1011 gdprintk(XENLOG_INFO, "Dom %d has no grant table.\n", rd->domain_id);
1012 return 0;
1015 spin_lock(&rgt->lock);
1017 if ( unlikely(ref >= nr_grant_entries(rd->grant_table)) )
1019 gdprintk(XENLOG_INFO,
1020 "Bad grant reference (%d) for transfer to domain(%d).\n",
1021 ref, rd->domain_id);
1022 goto fail;
1025 sha = &shared_entry(rgt, ref);
1027 scombo.word = *(u32 *)&sha->flags;
1029 for ( ; ; )
1031 if ( unlikely(scombo.shorts.flags != GTF_accept_transfer) ||
1032 unlikely(scombo.shorts.domid != ld->domain_id) )
1034 gdprintk(XENLOG_INFO, "Bad flags (%x) or dom (%d). "
1035 "(NB. expected dom %d)\n",
1036 scombo.shorts.flags, scombo.shorts.domid,
1037 ld->domain_id);
1038 goto fail;
1041 new_scombo = scombo;
1042 new_scombo.shorts.flags |= GTF_transfer_committed;
1044 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
1045 scombo.word, new_scombo.word);
1046 if ( likely(prev_scombo.word == scombo.word) )
1047 break;
1049 if ( retries++ == 4 )
1051 gdprintk(XENLOG_WARNING, "Shared grant entry is unstable.\n");
1052 goto fail;
1055 scombo = prev_scombo;
1058 spin_unlock(&rgt->lock);
1059 return 1;
1061 fail:
1062 spin_unlock(&rgt->lock);
1063 return 0;
1066 static long
1067 gnttab_transfer(
1068 XEN_GUEST_HANDLE(gnttab_transfer_t) uop, unsigned int count)
1070 struct domain *d = current->domain;
1071 struct domain *e;
1072 struct page_info *page;
1073 int i;
1074 grant_entry_t *sha;
1075 struct gnttab_transfer gop;
1076 unsigned long mfn;
1077 unsigned int max_bitsize;
1079 for ( i = 0; i < count; i++ )
1081 /* Read from caller address space. */
1082 if ( unlikely(__copy_from_guest_offset(&gop, uop, i, 1)) )
1084 gdprintk(XENLOG_INFO, "gnttab_transfer: error reading req %d/%d\n",
1085 i, count);
1086 return -EFAULT;
1089 mfn = gmfn_to_mfn(d, gop.mfn);
1091 /* Check the passed page frame for basic validity. */
1092 if ( unlikely(!mfn_valid(mfn)) )
1094 gdprintk(XENLOG_INFO, "gnttab_transfer: out-of-range %lx\n",
1095 (unsigned long)gop.mfn);
1096 gop.status = GNTST_bad_page;
1097 goto copyback;
1100 page = mfn_to_page(mfn);
1101 if ( unlikely(is_xen_heap_page(page)) )
1103 gdprintk(XENLOG_INFO, "gnttab_transfer: xen frame %lx\n",
1104 (unsigned long)gop.mfn);
1105 gop.status = GNTST_bad_page;
1106 goto copyback;
1109 if ( steal_page(d, page, 0) < 0 )
1111 gop.status = GNTST_bad_page;
1112 goto copyback;
1115 #ifndef __ia64__ /* IA64 implicitly replaces the old page in steal_page(). */
1116 guest_physmap_remove_page(d, gop.mfn, mfn, 0);
1117 #endif
1118 flush_tlb_mask(d->domain_dirty_cpumask);
1120 /* Find the target domain. */
1121 if ( unlikely((e = rcu_lock_domain_by_id(gop.domid)) == NULL) )
1123 gdprintk(XENLOG_INFO, "gnttab_transfer: can't find domain %d\n",
1124 gop.domid);
1125 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1126 free_domheap_page(page);
1127 gop.status = GNTST_bad_domain;
1128 goto copyback;
1131 if ( xsm_grant_transfer(d, e) )
1133 gop.status = GNTST_permission_denied;
1134 unlock_and_copyback:
1135 rcu_unlock_domain(e);
1136 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1137 free_domheap_page(page);
1138 goto copyback;
1141 max_bitsize = domain_clamp_alloc_bitsize(
1142 e, BITS_PER_LONG+PAGE_SHIFT-1);
1143 if ( (1UL << (max_bitsize - PAGE_SHIFT)) <= mfn )
1145 struct page_info *new_page;
1146 void *sp, *dp;
1148 new_page = alloc_domheap_page(NULL, MEMF_bits(max_bitsize));
1149 if ( new_page == NULL )
1151 gop.status = GNTST_address_too_big;
1152 goto unlock_and_copyback;
1155 sp = map_domain_page(mfn);
1156 dp = map_domain_page(page_to_mfn(new_page));
1157 memcpy(dp, sp, PAGE_SIZE);
1158 unmap_domain_page(dp);
1159 unmap_domain_page(sp);
1161 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1162 free_domheap_page(page);
1163 page = new_page;
1166 spin_lock(&e->page_alloc_lock);
1168 /*
1169 * Check that 'e' will accept the page and has reservation
1170 * headroom. Also, a domain mustn't have PGC_allocated
1171 * pages when it is dying.
1172 */
1173 if ( unlikely(e->is_dying) ||
1174 unlikely(e->tot_pages >= e->max_pages) ||
1175 unlikely(!gnttab_prepare_for_transfer(e, d, gop.ref)) )
1177 if ( !e->is_dying )
1178 gdprintk(XENLOG_INFO, "gnttab_transfer: "
1179 "Transferee has no reservation "
1180 "headroom (%d,%d) or provided a bad grant ref (%08x) "
1181 "or is dying (%d)\n",
1182 e->tot_pages, e->max_pages, gop.ref, e->is_dying);
1183 spin_unlock(&e->page_alloc_lock);
1184 rcu_unlock_domain(e);
1185 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1186 free_domheap_page(page);
1187 gop.status = GNTST_general_error;
1188 goto copyback;
1191 /* Okay, add the page to 'e'. */
1192 if ( unlikely(e->tot_pages++ == 0) )
1193 get_knownalive_domain(e);
1194 list_add_tail(&page->list, &e->page_list);
1195 page_set_owner(page, e);
1197 spin_unlock(&e->page_alloc_lock);
1199 TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
1201 /* Tell the guest about its new page frame. */
1202 spin_lock(&e->grant_table->lock);
1204 sha = &shared_entry(e->grant_table, gop.ref);
1205 guest_physmap_add_page(e, sha->frame, mfn, 0);
1206 sha->frame = mfn;
1207 wmb();
1208 sha->flags |= GTF_transfer_completed;
1210 spin_unlock(&e->grant_table->lock);
1212 rcu_unlock_domain(e);
1214 gop.status = GNTST_okay;
1216 copyback:
1217 if ( unlikely(__copy_to_guest_offset(uop, i, &gop, 1)) )
1219 gdprintk(XENLOG_INFO, "gnttab_transfer: error writing resp "
1220 "%d/%d\n", i, count);
1221 return -EFAULT;
1225 return 0;
1228 /* Undo __acquire_grant_for_copy. Again, this has no effect on page
1229 type and reference counts. */
1230 static void
1231 __release_grant_for_copy(
1232 struct domain *rd, unsigned long gref, int readonly)
1234 grant_entry_t *sha;
1235 struct active_grant_entry *act;
1236 unsigned long r_frame;
1238 spin_lock(&rd->grant_table->lock);
1240 act = &active_entry(rd->grant_table, gref);
1241 sha = &shared_entry(rd->grant_table, gref);
1242 r_frame = act->frame;
1244 if ( readonly )
1246 act->pin -= GNTPIN_hstr_inc;
1248 else
1250 gnttab_mark_dirty(rd, r_frame);
1252 act->pin -= GNTPIN_hstw_inc;
1253 if ( !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) )
1254 gnttab_clear_flag(_GTF_writing, &sha->flags);
1257 if ( !act->pin )
1258 gnttab_clear_flag(_GTF_reading, &sha->flags);
1260 spin_unlock(&rd->grant_table->lock);
1263 /* Grab a frame number from a grant entry and update the flags and pin
1264 count as appropriate. Note that this does *not* update the page
1265 type or reference counts, and does not check that the mfn is
1266 actually valid. */
1267 static int
1268 __acquire_grant_for_copy(
1269 struct domain *rd, unsigned long gref, int readonly,
1270 unsigned long *frame)
1272 grant_entry_t *sha;
1273 struct active_grant_entry *act;
1274 s16 rc = GNTST_okay;
1275 int retries = 0;
1276 union grant_combo scombo, prev_scombo, new_scombo;
1278 spin_lock(&rd->grant_table->lock);
1280 if ( unlikely(gref >= nr_grant_entries(rd->grant_table)) )
1281 PIN_FAIL(unlock_out, GNTST_bad_gntref,
1282 "Bad grant reference %ld\n", gref);
1284 act = &active_entry(rd->grant_table, gref);
1285 sha = &shared_entry(rd->grant_table, gref);
1287 /* If already pinned, check the active domid and avoid refcnt overflow. */
1288 if ( act->pin &&
1289 ((act->domid != current->domain->domain_id) ||
1290 (act->pin & 0x80808080U) != 0) )
1291 PIN_FAIL(unlock_out, GNTST_general_error,
1292 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
1293 act->domid, current->domain->domain_id, act->pin);
1295 if ( !act->pin ||
1296 (!readonly && !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask))) )
1298 scombo.word = *(u32 *)&sha->flags;
1300 for ( ; ; )
1302 /* If not already pinned, check the grant domid and type. */
1303 if ( !act->pin &&
1304 (((scombo.shorts.flags & GTF_type_mask) !=
1305 GTF_permit_access) ||
1306 (scombo.shorts.domid != current->domain->domain_id)) )
1307 PIN_FAIL(unlock_out, GNTST_general_error,
1308 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
1309 scombo.shorts.flags, scombo.shorts.domid,
1310 current->domain->domain_id);
1312 new_scombo = scombo;
1313 new_scombo.shorts.flags |= GTF_reading;
1315 if ( !readonly )
1317 new_scombo.shorts.flags |= GTF_writing;
1318 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
1319 PIN_FAIL(unlock_out, GNTST_general_error,
1320 "Attempt to write-pin a r/o grant entry.\n");
1323 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
1324 scombo.word, new_scombo.word);
1325 if ( likely(prev_scombo.word == scombo.word) )
1326 break;
1328 if ( retries++ == 4 )
1329 PIN_FAIL(unlock_out, GNTST_general_error,
1330 "Shared grant entry is unstable.\n");
1332 scombo = prev_scombo;
1335 if ( !act->pin )
1337 act->domid = scombo.shorts.domid;
1338 act->frame = gmfn_to_mfn(rd, sha->frame);
1342 act->pin += readonly ? GNTPIN_hstr_inc : GNTPIN_hstw_inc;
1344 *frame = act->frame;
1346 unlock_out:
1347 spin_unlock(&rd->grant_table->lock);
1348 return rc;
1351 static void
1352 __gnttab_copy(
1353 struct gnttab_copy *op)
1355 struct domain *sd = NULL, *dd = NULL;
1356 unsigned long s_frame, d_frame;
1357 char *sp, *dp;
1358 s16 rc = GNTST_okay;
1359 int have_d_grant = 0, have_s_grant = 0, have_s_ref = 0;
1360 int src_is_gref, dest_is_gref;
1362 if ( ((op->source.offset + op->len) > PAGE_SIZE) ||
1363 ((op->dest.offset + op->len) > PAGE_SIZE) )
1364 PIN_FAIL(error_out, GNTST_bad_copy_arg, "copy beyond page area.\n");
1366 src_is_gref = op->flags & GNTCOPY_source_gref;
1367 dest_is_gref = op->flags & GNTCOPY_dest_gref;
1369 if ( (op->source.domid != DOMID_SELF && !src_is_gref ) ||
1370 (op->dest.domid != DOMID_SELF && !dest_is_gref) )
1371 PIN_FAIL(error_out, GNTST_permission_denied,
1372 "only allow copy-by-mfn for DOMID_SELF.\n");
1374 if ( op->source.domid == DOMID_SELF )
1375 sd = rcu_lock_current_domain();
1376 else if ( (sd = rcu_lock_domain_by_id(op->source.domid)) == NULL )
1377 PIN_FAIL(error_out, GNTST_bad_domain,
1378 "couldn't find %d\n", op->source.domid);
1380 if ( op->dest.domid == DOMID_SELF )
1381 dd = rcu_lock_current_domain();
1382 else if ( (dd = rcu_lock_domain_by_id(op->dest.domid)) == NULL )
1383 PIN_FAIL(error_out, GNTST_bad_domain,
1384 "couldn't find %d\n", op->dest.domid);
1386 rc = xsm_grant_copy(sd, dd);
1387 if ( rc )
1389 rc = GNTST_permission_denied;
1390 goto error_out;
1393 if ( src_is_gref )
1395 rc = __acquire_grant_for_copy(sd, op->source.u.ref, 1, &s_frame);
1396 if ( rc != GNTST_okay )
1397 goto error_out;
1398 have_s_grant = 1;
1400 else
1402 s_frame = gmfn_to_mfn(sd, op->source.u.gmfn);
1404 if ( unlikely(!mfn_valid(s_frame)) )
1405 PIN_FAIL(error_out, GNTST_general_error,
1406 "source frame %lx invalid.\n", s_frame);
1407 if ( !get_page(mfn_to_page(s_frame), sd) )
1409 if ( !sd->is_dying )
1410 gdprintk(XENLOG_WARNING, "Could not get src frame %lx\n", s_frame);
1411 rc = GNTST_general_error;
1412 goto error_out;
1414 have_s_ref = 1;
1416 if ( dest_is_gref )
1418 rc = __acquire_grant_for_copy(dd, op->dest.u.ref, 0, &d_frame);
1419 if ( rc != GNTST_okay )
1420 goto error_out;
1421 have_d_grant = 1;
1423 else
1425 d_frame = gmfn_to_mfn(dd, op->dest.u.gmfn);
1427 if ( unlikely(!mfn_valid(d_frame)) )
1428 PIN_FAIL(error_out, GNTST_general_error,
1429 "destination frame %lx invalid.\n", d_frame);
1430 if ( !get_page_and_type(mfn_to_page(d_frame), dd, PGT_writable_page) )
1432 if ( !dd->is_dying )
1433 gdprintk(XENLOG_WARNING, "Could not get dst frame %lx\n", d_frame);
1434 rc = GNTST_general_error;
1435 goto error_out;
1438 sp = map_domain_page(s_frame);
1439 dp = map_domain_page(d_frame);
1441 memcpy(dp + op->dest.offset, sp + op->source.offset, op->len);
1443 unmap_domain_page(dp);
1444 unmap_domain_page(sp);
1446 gnttab_mark_dirty(dd, d_frame);
1448 put_page_and_type(mfn_to_page(d_frame));
1449 error_out:
1450 if ( have_s_ref )
1451 put_page(mfn_to_page(s_frame));
1452 if ( have_s_grant )
1453 __release_grant_for_copy(sd, op->source.u.ref, 1);
1454 if ( have_d_grant )
1455 __release_grant_for_copy(dd, op->dest.u.ref, 0);
1456 if ( sd )
1457 rcu_unlock_domain(sd);
1458 if ( dd )
1459 rcu_unlock_domain(dd);
1460 op->status = rc;
1463 static long
1464 gnttab_copy(
1465 XEN_GUEST_HANDLE(gnttab_copy_t) uop, unsigned int count)
1467 int i;
1468 struct gnttab_copy op;
1470 for ( i = 0; i < count; i++ )
1472 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
1473 return -EFAULT;
1474 __gnttab_copy(&op);
1475 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
1476 return -EFAULT;
1478 return 0;
1481 long
1482 do_grant_table_op(
1483 unsigned int cmd, XEN_GUEST_HANDLE(void) uop, unsigned int count)
1485 long rc;
1486 struct domain *d = current->domain;
1488 if ( count > 512 )
1489 return -EINVAL;
1491 domain_lock(d);
1493 rc = -EFAULT;
1494 switch ( cmd )
1496 case GNTTABOP_map_grant_ref:
1498 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) map =
1499 guest_handle_cast(uop, gnttab_map_grant_ref_t);
1500 if ( unlikely(!guest_handle_okay(map, count)) )
1501 goto out;
1502 rc = gnttab_map_grant_ref(map, count);
1503 break;
1505 case GNTTABOP_unmap_grant_ref:
1507 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) unmap =
1508 guest_handle_cast(uop, gnttab_unmap_grant_ref_t);
1509 if ( unlikely(!guest_handle_okay(unmap, count)) )
1510 goto out;
1511 rc = gnttab_unmap_grant_ref(unmap, count);
1512 break;
1514 case GNTTABOP_unmap_and_replace:
1516 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) unmap =
1517 guest_handle_cast(uop, gnttab_unmap_and_replace_t);
1518 if ( unlikely(!guest_handle_okay(unmap, count)) )
1519 goto out;
1520 rc = -ENOSYS;
1521 if ( unlikely(!replace_grant_supported()) )
1522 goto out;
1523 rc = gnttab_unmap_and_replace(unmap, count);
1524 break;
1526 case GNTTABOP_setup_table:
1528 rc = gnttab_setup_table(
1529 guest_handle_cast(uop, gnttab_setup_table_t), count);
1530 break;
1532 case GNTTABOP_transfer:
1534 XEN_GUEST_HANDLE(gnttab_transfer_t) transfer =
1535 guest_handle_cast(uop, gnttab_transfer_t);
1536 if ( unlikely(!guest_handle_okay(transfer, count)) )
1537 goto out;
1538 rc = gnttab_transfer(transfer, count);
1539 break;
1541 case GNTTABOP_copy:
1543 XEN_GUEST_HANDLE(gnttab_copy_t) copy =
1544 guest_handle_cast(uop, gnttab_copy_t);
1545 if ( unlikely(!guest_handle_okay(copy, count)) )
1546 goto out;
1547 rc = gnttab_copy(copy, count);
1548 break;
1550 case GNTTABOP_query_size:
1552 rc = gnttab_query_size(
1553 guest_handle_cast(uop, gnttab_query_size_t), count);
1554 break;
1556 default:
1557 rc = -ENOSYS;
1558 break;
1561 out:
1562 domain_unlock(d);
1564 return rc;
1567 #ifdef CONFIG_COMPAT
1568 #include "compat/grant_table.c"
1569 #endif
1571 static unsigned int max_nr_active_grant_frames(void)
1573 return (((max_nr_grant_frames * (PAGE_SIZE / sizeof(grant_entry_t))) +
1574 ((PAGE_SIZE / sizeof(struct active_grant_entry))-1))
1575 / (PAGE_SIZE / sizeof(struct active_grant_entry)));
1578 int
1579 grant_table_create(
1580 struct domain *d)
1582 struct grant_table *t;
1583 int i;
1585 /* If this sizeof assertion fails, fix the function: shared_index */
1586 ASSERT(sizeof(grant_entry_t) == 8);
1588 if ( (t = xmalloc(struct grant_table)) == NULL )
1589 goto no_mem_0;
1591 /* Simple stuff. */
1592 memset(t, 0, sizeof(*t));
1593 spin_lock_init(&t->lock);
1594 t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
1596 /* Active grant table. */
1597 if ( (t->active = xmalloc_array(struct active_grant_entry *,
1598 max_nr_active_grant_frames())) == NULL )
1599 goto no_mem_1;
1600 memset(t->active, 0, max_nr_active_grant_frames() * sizeof(t->active[0]));
1601 for ( i = 0;
1602 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1604 if ( (t->active[i] = alloc_xenheap_page()) == NULL )
1605 goto no_mem_2;
1606 clear_page(t->active[i]);
1609 /* Tracking of mapped foreign frames table */
1610 if ( (t->maptrack = xmalloc_array(struct grant_mapping *,
1611 max_nr_maptrack_frames())) == NULL )
1612 goto no_mem_2;
1613 memset(t->maptrack, 0, max_nr_maptrack_frames() * sizeof(t->maptrack[0]));
1614 if ( (t->maptrack[0] = alloc_xenheap_page()) == NULL )
1615 goto no_mem_3;
1616 clear_page(t->maptrack[0]);
1617 t->maptrack_limit = PAGE_SIZE / sizeof(struct grant_mapping);
1618 for ( i = 0; i < t->maptrack_limit; i++ )
1619 t->maptrack[0][i].ref = i+1;
1621 /* Shared grant table. */
1622 if ( (t->shared = xmalloc_array(struct grant_entry *,
1623 max_nr_grant_frames)) == NULL )
1624 goto no_mem_3;
1625 memset(t->shared, 0, max_nr_grant_frames * sizeof(t->shared[0]));
1626 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1628 if ( (t->shared[i] = alloc_xenheap_page()) == NULL )
1629 goto no_mem_4;
1630 clear_page(t->shared[i]);
1633 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1634 gnttab_create_shared_page(d, t, i);
1636 /* Okay, install the structure. */
1637 d->grant_table = t;
1638 return 0;
1640 no_mem_4:
1641 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1642 free_xenheap_page(t->shared[i]);
1643 xfree(t->shared);
1644 no_mem_3:
1645 free_xenheap_page(t->maptrack[0]);
1646 xfree(t->maptrack);
1647 no_mem_2:
1648 for ( i = 0;
1649 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1650 free_xenheap_page(t->active[i]);
1651 xfree(t->active);
1652 no_mem_1:
1653 xfree(t);
1654 no_mem_0:
1655 return -ENOMEM;
1658 void
1659 gnttab_release_mappings(
1660 struct domain *d)
1662 struct grant_table *gt = d->grant_table;
1663 struct grant_mapping *map;
1664 grant_ref_t ref;
1665 grant_handle_t handle;
1666 struct domain *rd;
1667 struct active_grant_entry *act;
1668 struct grant_entry *sha;
1670 BUG_ON(!d->is_dying);
1672 for ( handle = 0; handle < gt->maptrack_limit; handle++ )
1674 map = &maptrack_entry(gt, handle);
1675 if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
1676 continue;
1678 ref = map->ref;
1680 gdprintk(XENLOG_INFO, "Grant release (%hu) ref:(%hu) "
1681 "flags:(%x) dom:(%hu)\n",
1682 handle, ref, map->flags, map->domid);
1684 rd = rcu_lock_domain_by_id(map->domid);
1685 if ( rd == NULL )
1687 /* Nothing to clear up... */
1688 map->flags = 0;
1689 continue;
1692 spin_lock(&rd->grant_table->lock);
1694 act = &active_entry(rd->grant_table, ref);
1695 sha = &shared_entry(rd->grant_table, ref);
1697 if ( map->flags & GNTMAP_readonly )
1699 if ( map->flags & GNTMAP_device_map )
1701 BUG_ON(!(act->pin & GNTPIN_devr_mask));
1702 act->pin -= GNTPIN_devr_inc;
1703 if ( !is_iomem_page(act->frame) )
1704 put_page(mfn_to_page(act->frame));
1707 if ( map->flags & GNTMAP_host_map )
1709 BUG_ON(!(act->pin & GNTPIN_hstr_mask));
1710 act->pin -= GNTPIN_hstr_inc;
1711 if ( gnttab_release_host_mappings &&
1712 !is_iomem_page(act->frame) )
1713 put_page(mfn_to_page(act->frame));
1716 else
1718 if ( map->flags & GNTMAP_device_map )
1720 BUG_ON(!(act->pin & GNTPIN_devw_mask));
1721 act->pin -= GNTPIN_devw_inc;
1722 if ( !is_iomem_page(act->frame) )
1723 put_page_and_type(mfn_to_page(act->frame));
1726 if ( map->flags & GNTMAP_host_map )
1728 BUG_ON(!(act->pin & GNTPIN_hstw_mask));
1729 act->pin -= GNTPIN_hstw_inc;
1730 if ( gnttab_release_host_mappings &&
1731 !is_iomem_page(act->frame) )
1733 if ( gnttab_host_mapping_get_page_type(map, d, rd) )
1734 put_page_type(mfn_to_page(act->frame));
1735 put_page(mfn_to_page(act->frame));
1739 if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
1740 gnttab_clear_flag(_GTF_writing, &sha->flags);
1743 if ( act->pin == 0 )
1744 gnttab_clear_flag(_GTF_reading, &sha->flags);
1746 spin_unlock(&rd->grant_table->lock);
1748 rcu_unlock_domain(rd);
1750 map->flags = 0;
1755 void
1756 grant_table_destroy(
1757 struct domain *d)
1759 struct grant_table *t = d->grant_table;
1760 int i;
1762 if ( t == NULL )
1763 return;
1765 for ( i = 0; i < nr_grant_frames(t); i++ )
1766 free_xenheap_page(t->shared[i]);
1767 xfree(t->shared);
1769 for ( i = 0; i < nr_maptrack_frames(t); i++ )
1770 free_xenheap_page(t->maptrack[i]);
1771 xfree(t->maptrack);
1773 for ( i = 0; i < nr_active_grant_frames(t); i++ )
1774 free_xenheap_page(t->active[i]);
1775 xfree(t->active);
1777 xfree(t);
1778 d->grant_table = NULL;
1781 /*
1782 * Local variables:
1783 * mode: C
1784 * c-set-style: "BSD"
1785 * c-basic-offset: 4
1786 * tab-width: 4
1787 * indent-tabs-mode: nil
1788 * End:
1789 */