ia64/xen-unstable

view xen/include/xen/tmem_xen.h @ 19800:78962f85c562

IOMMU: Add two generic functions to vendor neutral interface

Add 2 generic functions into the vendor neutral iommu interface, The
reason is that from changeset 19732, there is only one global flag
"iommu_enabled" that controls iommu enablement for both vtd and amd
systems, so we need different code paths for vtd and amd iommu systems
if this flag has been turned on. Also, the early checking of
"iommu_enabled" in iommu_setup() is removed to prevent iommu
functionalities from been disabled on amd systems.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 19 08:41:50 2009 +0100 (2009-06-19)
parents 0ea75c3b7743
children
line source
1 /******************************************************************************
2 * tmem_xen.h
3 *
4 * Xen-specific Transcendent memory
5 *
6 * Copyright (c) 2009, Dan Magenheimer, Oracle Corp.
7 */
9 #ifndef __XEN_TMEM_XEN_H__
10 #define __XEN_TMEM_XEN_H__
12 #include <xen/config.h>
13 #include <xen/mm.h> /* heap alloc/free */
14 #include <xen/xmalloc.h> /* xmalloc/xfree */
15 #include <xen/sched.h> /* struct domain */
16 #include <xen/guest_access.h> /* copy_from_guest */
17 #include <xen/hash.h> /* hash_long */
18 #include <public/tmem.h>
19 #ifdef CONFIG_COMPAT
20 #include <compat/tmem.h>
21 #endif
23 struct tmem_host_dependent_client {
24 struct domain *domain;
25 struct xmem_pool *persistent_pool;
26 };
27 typedef struct tmem_host_dependent_client tmh_client_t;
29 #define IS_PAGE_ALIGNED(addr) \
30 ((void *)((((unsigned long)addr + (PAGE_SIZE - 1)) & PAGE_MASK)) == addr)
31 #define IS_VALID_PAGE(_pi) ( mfn_valid(page_to_mfn(_pi)) )
33 extern struct xmem_pool *tmh_mempool;
34 extern unsigned int tmh_mempool_maxalloc;
35 extern struct page_list_head tmh_page_list;
36 extern spinlock_t tmh_page_list_lock;
37 extern unsigned long tmh_page_list_pages;
39 extern spinlock_t tmem_lock;
40 extern spinlock_t tmem_spinlock;
41 extern rwlock_t tmem_rwlock;
43 extern void tmh_copy_page(char *to, char*from);
44 extern int tmh_init(void);
45 extern tmh_client_t *tmh_client_init(void);
46 extern void tmh_client_destroy(tmh_client_t *);
47 #define tmh_hash hash_long
49 extern void tmh_release_avail_pages_to_host(void);
50 extern void tmh_scrub_page(struct page_info *pi, unsigned int memflags);
52 extern int opt_tmem_compress;
53 static inline int tmh_compression_enabled(void)
54 {
55 return opt_tmem_compress;
56 }
58 extern int opt_tmem;
59 static inline int tmh_enabled(void)
60 {
61 return opt_tmem;
62 }
64 extern int opt_tmem_lock;
66 extern int opt_tmem_flush_dups;
68 /*
69 * Memory free page list management
70 */
72 static inline struct page_info *tmh_page_list_get(void)
73 {
74 struct page_info *pi;
76 spin_lock(&tmh_page_list_lock);
77 if ( (pi = page_list_remove_head(&tmh_page_list)) != NULL )
78 tmh_page_list_pages--;
79 spin_unlock(&tmh_page_list_lock);
80 ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
81 return pi;
82 }
84 static inline void tmh_page_list_put(struct page_info *pi)
85 {
86 ASSERT(IS_VALID_PAGE(pi));
87 spin_lock(&tmh_page_list_lock);
88 page_list_add(pi, &tmh_page_list);
89 tmh_page_list_pages++;
90 spin_unlock(&tmh_page_list_lock);
91 }
93 static inline unsigned long tmh_avail_pages(void)
94 {
95 return tmh_page_list_pages;
96 }
98 /*
99 * Ephemeral memory allocation for persistent data
100 */
102 static inline bool_t domain_fully_allocated(struct domain *d)
103 {
104 return ( d->tot_pages >= d->max_pages );
105 }
106 #define tmh_client_memory_fully_allocated(_pool) \
107 domain_fully_allocated(_pool->client->tmh->domain)
109 static inline void *_tmh_alloc_subpage_thispool(struct xmem_pool *cmem_mempool,
110 size_t size, size_t align)
111 {
112 #if 0
113 if ( d->tot_pages >= d->max_pages )
114 return NULL;
115 #endif
116 #ifdef __i386__
117 return _xmalloc(size,align);
118 #else
119 ASSERT( size < tmh_mempool_maxalloc );
120 if ( cmem_mempool == NULL )
121 return NULL;
122 return xmem_pool_alloc(size, cmem_mempool);
123 #endif
124 }
125 #define tmh_alloc_subpage_thispool(_pool, _s, _a) \
126 _tmh_alloc_subpage_thispool(pool->client->tmh->persistent_pool, \
127 _s, _a)
129 static inline void _tmh_free_subpage_thispool(struct xmem_pool *cmem_mempool,
130 void *ptr, size_t size)
131 {
132 #ifdef __i386__
133 xfree(ptr);
134 #else
135 ASSERT( size < tmh_mempool_maxalloc );
136 ASSERT( cmem_mempool != NULL );
137 xmem_pool_free(ptr,cmem_mempool);
138 #endif
139 }
140 #define tmh_free_subpage_thispool(_pool, _p, _s) \
141 _tmh_free_subpage_thispool(_pool->client->tmh->persistent_pool, _p, _s)
143 static inline struct page_info *_tmh_alloc_page_thispool(struct domain *d)
144 {
145 struct page_info *pi;
147 /* note that this tot_pages check is not protected by d->page_alloc_lock,
148 * so may race and periodically fail in donate_page or alloc_domheap_pages
149 * That's OK... neither is a problem, though chatty if log_lvl is set */
150 if ( d->tot_pages >= d->max_pages )
151 return NULL;
153 if ( tmh_page_list_pages )
154 {
155 if ( (pi = tmh_page_list_get()) != NULL )
156 {
157 if ( donate_page(d,pi,0) == 0 )
158 goto out;
159 else
160 tmh_page_list_put(pi);
161 }
162 }
164 pi = alloc_domheap_pages(d,0,MEMF_tmem);
166 out:
167 ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
168 return pi;
169 }
170 #define tmh_alloc_page_thispool(_pool) \
171 _tmh_alloc_page_thispool(_pool->client->tmh->domain)
173 static inline void _tmh_free_page_thispool(struct page_info *pi)
174 {
175 struct domain *d = page_get_owner(pi);
177 ASSERT(IS_VALID_PAGE(pi));
178 if ( (d == NULL) || steal_page(d,pi,0) == 0 )
179 tmh_page_list_put(pi);
180 else
181 {
182 scrub_one_page(pi);
183 ASSERT((pi->count_info & ~(PGC_allocated | 1)) == 0);
184 free_domheap_pages(pi,0);
185 }
186 }
187 #define tmh_free_page_thispool(_pool,_pg) \
188 _tmh_free_page_thispool(_pg)
190 /*
191 * Memory allocation for ephemeral (non-persistent) data
192 */
194 static inline void *tmh_alloc_subpage(void *pool, size_t size,
195 size_t align)
196 {
197 #ifdef __i386__
198 ASSERT( size < PAGE_SIZE );
199 return _xmalloc(size, align);
200 #else
201 ASSERT( size < tmh_mempool_maxalloc );
202 ASSERT( tmh_mempool != NULL );
203 return xmem_pool_alloc(size, tmh_mempool);
204 #endif
205 }
207 static inline void tmh_free_subpage(void *ptr, size_t size)
208 {
209 #ifdef __i386__
210 ASSERT( size < PAGE_SIZE );
211 xfree(ptr);
212 #else
213 ASSERT( size < tmh_mempool_maxalloc );
214 xmem_pool_free(ptr,tmh_mempool);
215 #endif
216 }
218 static inline struct page_info *tmh_alloc_page(void *pool, int no_heap)
219 {
220 struct page_info *pi = tmh_page_list_get();
222 if ( pi == NULL && !no_heap )
223 pi = alloc_domheap_pages(0,0,MEMF_tmem);
224 ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
225 return pi;
226 }
228 static inline void tmh_free_page(struct page_info *pi)
229 {
230 ASSERT(IS_VALID_PAGE(pi));
231 tmh_page_list_put(pi);
232 }
234 static inline unsigned int tmem_subpage_maxsize(void)
235 {
236 return tmh_mempool_maxalloc;
237 }
239 #define tmh_lock_all opt_tmem_lock
240 #define tmh_flush_dups opt_tmem_flush_dups
241 #define tmh_called_from_tmem(_memflags) (_memflags & MEMF_tmem)
243 /* "Client" (==domain) abstraction */
245 struct client;
246 typedef domid_t cli_id_t;
247 typedef struct domain tmh_cli_ptr_t;
248 typedef struct page_info pfp_t;
250 /* this appears to be unreliable when a domain is being shut down */
251 static inline struct client *tmh_client_from_cli_id(cli_id_t cli_id)
252 {
253 struct domain *d = get_domain_by_id(cli_id);
254 if (d == NULL)
255 return NULL;
256 return (struct client *)(d->tmem);
257 }
259 static inline struct client *tmh_client_from_current(void)
260 {
261 return (struct client *)(current->domain->tmem);
262 }
264 static inline cli_id_t tmh_get_cli_id_from_current(void)
265 {
266 return current->domain->domain_id;
267 }
269 static inline tmh_cli_ptr_t *tmh_get_cli_ptr_from_current(void)
270 {
271 return current->domain;
272 }
274 static inline void tmh_set_current_client(struct client *client)
275 {
276 current->domain->tmem = client;
277 }
279 static inline bool_t tmh_current_is_privileged(void)
280 {
281 return IS_PRIV(current->domain);
282 }
284 /* these typedefs are in the public/tmem.h interface
285 typedef XEN_GUEST_HANDLE(void) cli_mfn_t;
286 typedef XEN_GUEST_HANDLE(char) cli_va_t;
287 */
288 typedef XEN_GUEST_HANDLE(tmem_op_t) tmem_cli_op_t;
290 static inline int tmh_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
291 {
292 #ifdef CONFIG_COMPAT
293 if ( is_pv_32on64_vcpu(current) )
294 {
295 int rc;
296 enum XLAT_tmem_op_u u;
297 tmem_op_compat_t cop;
299 rc = copy_from_guest(&cop, guest_handle_cast(uops, void), 1);
300 if ( rc )
301 return rc;
302 switch ( cop.cmd )
303 {
304 case TMEM_NEW_POOL: u = XLAT_tmem_op_u_new; break;
305 case TMEM_CONTROL: u = XLAT_tmem_op_u_ctrl; break;
306 default: u = XLAT_tmem_op_u_gen; break;
307 }
308 #define XLAT_tmem_op_HNDL_u_ctrl_buf(_d_, _s_) \
309 guest_from_compat_handle((_d_)->u.ctrl.buf, (_s_)->u.ctrl.buf)
310 XLAT_tmem_op(op, &cop);
311 #undef XLAT_tmem_op_HNDL_u_ctrl_buf
312 return 0;
313 }
314 #endif
315 return copy_from_guest(op, uops, 1);
316 }
318 static inline void tmh_copy_to_client_buf_offset(tmem_cli_va_t clibuf, int off,
319 char *tmembuf, int len)
320 {
321 copy_to_guest_offset(clibuf,off,tmembuf,len);
322 }
324 #define TMH_CLI_ID_NULL ((cli_id_t)((domid_t)-1L))
326 #define tmh_cli_id_str "domid"
327 #define tmh_client_str "domain"
329 extern int tmh_decompress_to_client(tmem_cli_mfn_t,void*,size_t);
331 extern int tmh_compress_from_client(tmem_cli_mfn_t,void**,size_t *);
333 extern int tmh_copy_from_client(pfp_t *pfp,
334 tmem_cli_mfn_t cmfn, uint32_t tmem_offset,
335 uint32_t pfn_offset, uint32_t len);
337 extern int tmh_copy_to_client(tmem_cli_mfn_t cmfn, pfp_t *pfp,
338 uint32_t tmem_offset, uint32_t pfn_offset, uint32_t len);
341 #define TMEM_PERF
342 #ifdef TMEM_PERF
343 #define DECL_CYC_COUNTER(x) \
344 uint64_t x##_sum_cycles = 0, x##_count = 0; \
345 uint32_t x##_min_cycles = 0x7fffffff, x##_max_cycles = 0;
346 #define EXTERN_CYC_COUNTER(x) \
347 extern uint64_t x##_sum_cycles, x##_count; \
348 extern uint32_t x##_min_cycles, x##_max_cycles;
349 #define DECL_LOCAL_CYC_COUNTER(x) \
350 int64_t x##_start = 0
351 #define START_CYC_COUNTER(x) x##_start = get_cycles()
352 #define DUP_START_CYC_COUNTER(x,y) x##_start = y##_start
353 /* following might race, but since its advisory only, don't care */
354 #define END_CYC_COUNTER(x) \
355 do { \
356 x##_start = get_cycles() - x##_start; \
357 if (x##_start > 0 && x##_start < 1000000000) { \
358 x##_sum_cycles += x##_start; x##_count++; \
359 if ((uint32_t)x##_start < x##_min_cycles) x##_min_cycles = x##_start; \
360 if ((uint32_t)x##_start > x##_max_cycles) x##_max_cycles = x##_start; \
361 } \
362 } while (0)
363 #define RESET_CYC_COUNTER(x) { x##_sum_cycles = 0, x##_count = 0; \
364 x##_min_cycles = 0x7fffffff, x##_max_cycles = 0; }
365 #define SCNPRINTF_CYC_COUNTER(buf,size,x,tag) \
366 scnprintf(buf,size, \
367 tag"n:%"PRIu64","tag"t:%"PRIu64","tag"x:%"PRId32","tag"m:%"PRId32",", \
368 x##_count,x##_sum_cycles,x##_max_cycles,x##_min_cycles)
369 #else
370 #define DECL_CYC_COUNTER(x)
371 #define EXTERN_CYC_COUNTER(x) \
372 extern uint64_t x##_sum_cycles, x##_count; \
373 extern uint32_t x##_min_cycles, x##_max_cycles;
374 #define DECL_LOCAL_CYC_COUNTER(x) do { } while (0)
375 #define START_CYC_COUNTER(x) do { } while (0)
376 #define DUP_START_CYC_COUNTER(x) do { } while (0)
377 #define END_CYC_COUNTER(x) do { } while (0)
378 #define SCNPRINTF_CYC_COUNTER(buf,size,x,tag) (0)
379 #define RESET_CYC_COUNTER(x) do { } while (0)
380 #endif
382 #endif /* __XEN_TMEM_XEN_H__ */