ia64/xen-unstable

view xen/include/xen/domain_page.h @ 19800:78962f85c562

IOMMU: Add two generic functions to vendor neutral interface

Add 2 generic functions into the vendor neutral iommu interface, The
reason is that from changeset 19732, there is only one global flag
"iommu_enabled" that controls iommu enablement for both vtd and amd
systems, so we need different code paths for vtd and amd iommu systems
if this flag has been turned on. Also, the early checking of
"iommu_enabled" in iommu_setup() is removed to prevent iommu
functionalities from been disabled on amd systems.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 19 08:41:50 2009 +0100 (2009-06-19)
parents 9bbb54fd9181
children
line source
1 /******************************************************************************
2 * domain_page.h
3 *
4 * Allow temporary mapping of domain page frames into Xen space.
5 *
6 * Copyright (c) 2003-2006, Keir Fraser <keir@xensource.com>
7 */
9 #ifndef __XEN_DOMAIN_PAGE_H__
10 #define __XEN_DOMAIN_PAGE_H__
12 #include <xen/config.h>
13 #include <xen/mm.h>
15 #ifdef CONFIG_DOMAIN_PAGE
17 /*
18 * Map a given page frame, returning the mapped virtual address. The page is
19 * then accessible within the current VCPU until a corresponding unmap call.
20 */
21 void *map_domain_page(unsigned long mfn);
23 /*
24 * Pass a VA within a page previously mapped in the context of the
25 * currently-executing VCPU via a call to map_domain_page().
26 */
27 void unmap_domain_page(const void *va);
29 /*
30 * Similar to the above calls, except the mapping is accessible in all
31 * address spaces (not just within the VCPU that created the mapping). Global
32 * mappings can also be unmapped from any context.
33 */
34 void *map_domain_page_global(unsigned long mfn);
35 void unmap_domain_page_global(const void *va);
37 #define DMCACHE_ENTRY_VALID 1U
38 #define DMCACHE_ENTRY_HELD 2U
40 struct domain_mmap_cache {
41 unsigned long mfn;
42 void *va;
43 unsigned int flags;
44 };
46 static inline void
47 domain_mmap_cache_init(struct domain_mmap_cache *cache)
48 {
49 ASSERT(cache != NULL);
50 cache->flags = 0;
51 cache->mfn = 0;
52 cache->va = NULL;
53 }
55 static inline void *
56 map_domain_page_with_cache(unsigned long mfn, struct domain_mmap_cache *cache)
57 {
58 ASSERT(cache != NULL);
59 BUG_ON(cache->flags & DMCACHE_ENTRY_HELD);
61 if ( likely(cache->flags & DMCACHE_ENTRY_VALID) )
62 {
63 cache->flags |= DMCACHE_ENTRY_HELD;
64 if ( likely(mfn == cache->mfn) )
65 goto done;
66 unmap_domain_page(cache->va);
67 }
69 cache->mfn = mfn;
70 cache->va = map_domain_page(mfn);
71 cache->flags = DMCACHE_ENTRY_HELD | DMCACHE_ENTRY_VALID;
73 done:
74 return cache->va;
75 }
77 static inline void
78 unmap_domain_page_with_cache(const void *va, struct domain_mmap_cache *cache)
79 {
80 ASSERT(cache != NULL);
81 cache->flags &= ~DMCACHE_ENTRY_HELD;
82 }
84 static inline void
85 domain_mmap_cache_destroy(struct domain_mmap_cache *cache)
86 {
87 ASSERT(cache != NULL);
88 BUG_ON(cache->flags & DMCACHE_ENTRY_HELD);
90 if ( likely(cache->flags & DMCACHE_ENTRY_VALID) )
91 {
92 unmap_domain_page(cache->va);
93 cache->flags = 0;
94 }
95 }
97 #else /* !CONFIG_DOMAIN_PAGE */
99 #define map_domain_page(mfn) mfn_to_virt(mfn)
100 #define unmap_domain_page(va) ((void)(va))
102 #define map_domain_page_global(mfn) mfn_to_virt(mfn)
103 #define unmap_domain_page_global(va) ((void)(va))
105 struct domain_mmap_cache {
106 };
108 #define domain_mmap_cache_init(c) ((void)(c))
109 #define map_domain_page_with_cache(mfn,c) (map_domain_page(mfn))
110 #define unmap_domain_page_with_cache(va,c) ((void)(va))
111 #define domain_mmap_cache_destroy(c) ((void)(c))
113 #endif /* !CONFIG_DOMAIN_PAGE */
115 #endif /* __XEN_DOMAIN_PAGE_H__ */