ia64/xen-unstable

view xen/common/tmem_xen.c @ 19835:edfdeb150f27

Fix buildsystem to detect udev > version 124

udev removed the udevinfo symlink from versions higher than 123 and
xen's build-system could not detect if udev is in place and has the
required version.

Signed-off-by: Marc-A. Dahlhaus <mad@wol.de>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 25 13:02:37 2009 +0100 (2009-06-25)
parents d96cf4c974d5
children
line source
1 /******************************************************************************
2 * tmem-xen.c
3 *
4 * Xen-specific Transcendent memory
5 *
6 * Copyright (c) 2009, Dan Magenheimer, Oracle Corp.
7 */
9 #include <xen/tmem.h>
10 #include <xen/tmem_xen.h>
11 #include <xen/lzo.h> /* compression code */
12 #include <xen/paging.h>
13 #include <xen/domain_page.h>
15 #define EXPORT /* indicates code other modules are dependent upon */
17 EXPORT int opt_tmem = 0;
18 boolean_param("tmem", opt_tmem);
20 EXPORT int opt_tmem_compress = 0;
21 boolean_param("tmem_compress", opt_tmem_compress);
23 EXPORT int opt_tmem_lock = 0;
24 integer_param("tmem_lock", opt_tmem_lock);
26 #ifdef COMPARE_COPY_PAGE_SSE2
27 DECL_CYC_COUNTER(pg_copy1);
28 DECL_CYC_COUNTER(pg_copy2);
29 DECL_CYC_COUNTER(pg_copy3);
30 DECL_CYC_COUNTER(pg_copy4);
31 #else
32 DECL_CYC_COUNTER(pg_copy);
33 #endif
35 /* these are a concurrency bottleneck, could be percpu and dynamically
36 * allocated iff opt_tmem_compress */
37 #define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
38 #define LZO_DSTMEM_PAGES 2
39 static DEFINE_PER_CPU(unsigned char *, workmem);
40 static DEFINE_PER_CPU(unsigned char *, dstmem);
42 #ifdef COMPARE_COPY_PAGE_SSE2
43 #include <asm/flushtlb.h> /* REMOVE ME AFTER TEST */
44 #include <asm/page.h> /* REMOVE ME AFTER TEST */
45 #endif
46 void tmh_copy_page(char *to, char*from)
47 {
48 #ifdef COMPARE_COPY_PAGE_SSE2
49 DECL_LOCAL_CYC_COUNTER(pg_copy1);
50 DECL_LOCAL_CYC_COUNTER(pg_copy2);
51 DECL_LOCAL_CYC_COUNTER(pg_copy3);
52 DECL_LOCAL_CYC_COUNTER(pg_copy4);
53 *to = *from; /* don't measure TLB misses */
54 flush_area_local(to,FLUSH_CACHE|FLUSH_ORDER(0));
55 flush_area_local(from,FLUSH_CACHE|FLUSH_ORDER(0));
56 START_CYC_COUNTER(pg_copy1);
57 copy_page_sse2(to, from); /* cold cache */
58 END_CYC_COUNTER(pg_copy1);
59 START_CYC_COUNTER(pg_copy2);
60 copy_page_sse2(to, from); /* hot cache */
61 END_CYC_COUNTER(pg_copy2);
62 flush_area_local(to,FLUSH_CACHE|FLUSH_ORDER(0));
63 flush_area_local(from,FLUSH_CACHE|FLUSH_ORDER(0));
64 START_CYC_COUNTER(pg_copy3);
65 memcpy(to, from, PAGE_SIZE); /* cold cache */
66 END_CYC_COUNTER(pg_copy3);
67 START_CYC_COUNTER(pg_copy4);
68 memcpy(to, from, PAGE_SIZE); /* hot cache */
69 END_CYC_COUNTER(pg_copy4);
70 #else
71 DECL_LOCAL_CYC_COUNTER(pg_copy);
72 START_CYC_COUNTER(pg_copy);
73 memcpy(to, from, PAGE_SIZE);
74 END_CYC_COUNTER(pg_copy);
75 #endif
76 }
78 #ifdef __ia64__
79 static inline void *cli_mfn_to_va(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn)
80 {
81 ASSERT(0);
82 }
83 #define paging_mark_dirty(_x,_y) do {} while(0)
84 #else
85 static inline void *cli_mfn_to_va(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn)
86 {
87 unsigned long cli_mfn;
88 p2m_type_t t;
90 cli_mfn = mfn_x(gfn_to_mfn(current->domain, cmfn, &t));
91 if (t != p2m_ram_rw)
92 return NULL;
93 if (pcli_mfn != NULL)
94 *pcli_mfn = cli_mfn;
95 return map_domain_page(cli_mfn);
96 }
97 #endif
99 EXPORT int tmh_copy_from_client(pfp_t *pfp,
100 tmem_cli_mfn_t cmfn, uint32_t tmem_offset,
101 uint32_t pfn_offset, uint32_t len)
102 {
103 unsigned long tmem_mfn;
104 void *tmem_va, *cli_va = NULL;
106 ASSERT(pfp != NULL);
107 if ( tmem_offset || pfn_offset || len )
108 if ( (cli_va = cli_mfn_to_va(cmfn,NULL)) == NULL)
109 return -EFAULT;
110 tmem_mfn = page_to_mfn(pfp);
111 tmem_va = map_domain_page(tmem_mfn);
112 mb();
113 if (!len && !tmem_offset && !pfn_offset)
114 memset(tmem_va, 0, PAGE_SIZE);
115 else if (len == PAGE_SIZE && !tmem_offset && !pfn_offset)
116 tmh_copy_page(tmem_va, cli_va);
117 else if ( (tmem_offset+len <= PAGE_SIZE) &&
118 (pfn_offset+len <= PAGE_SIZE) )
119 memcpy((char *)tmem_va+tmem_offset,(char *)cli_va+pfn_offset,len);
120 unmap_domain_page(cli_va);
121 unmap_domain_page(tmem_va);
122 return 1;
123 }
125 EXPORT int tmh_compress_from_client(tmem_cli_mfn_t cmfn,
126 void **out_va, size_t *out_len)
127 {
128 void *cli_va;
129 int ret = 0;
130 unsigned char *dmem = this_cpu(dstmem);
131 unsigned char *wmem = this_cpu(workmem);
133 if ( (cli_va = cli_mfn_to_va(cmfn,NULL)) == NULL)
134 return -EFAULT;
135 if ( dmem == NULL || wmem == NULL )
136 return 0; /* no buffer, so can't compress */
137 mb();
138 ret = lzo1x_1_compress(cli_va, PAGE_SIZE, dmem, out_len, wmem);
139 ASSERT(ret == LZO_E_OK);
140 *out_va = dmem;
141 unmap_domain_page(cli_va);
142 return 1;
143 }
145 EXPORT int tmh_copy_to_client(tmem_cli_mfn_t cmfn, pfp_t *pfp,
146 uint32_t tmem_offset, uint32_t pfn_offset, uint32_t len)
147 {
148 unsigned long tmem_mfn, cli_mfn;
149 void *tmem_va, *cli_va;
151 ASSERT(pfp != NULL);
152 if ( (cli_va = cli_mfn_to_va(cmfn,&cli_mfn)) == NULL)
153 return -EFAULT;
154 tmem_mfn = page_to_mfn(pfp);
155 tmem_va = map_domain_page(tmem_mfn);
156 if (len == PAGE_SIZE && !tmem_offset && !pfn_offset)
157 tmh_copy_page(cli_va, tmem_va);
158 else if ( (tmem_offset+len <= PAGE_SIZE) && (pfn_offset+len <= PAGE_SIZE) )
159 memcpy((char *)cli_va+pfn_offset,(char *)tmem_va+tmem_offset,len);
160 unmap_domain_page(tmem_va);
161 unmap_domain_page(cli_va);
162 paging_mark_dirty(current->domain,cli_mfn);
163 mb();
164 return 1;
165 }
167 EXPORT int tmh_decompress_to_client(tmem_cli_mfn_t cmfn, void *tmem_va, size_t size)
168 {
169 unsigned long cli_mfn;
170 void *cli_va;
171 size_t out_len = PAGE_SIZE;
172 int ret;
174 if ( (cli_va = cli_mfn_to_va(cmfn,&cli_mfn)) == NULL)
175 return -EFAULT;
176 ret = lzo1x_decompress_safe(tmem_va, size, cli_va, &out_len);
177 ASSERT(ret == LZO_E_OK);
178 ASSERT(out_len == PAGE_SIZE);
179 unmap_domain_page(cli_va);
180 paging_mark_dirty(current->domain,cli_mfn);
181 mb();
182 return 1;
183 }
185 /****************** XEN-SPECIFIC MEMORY ALLOCATION ********************/
187 EXPORT struct xmem_pool *tmh_mempool = 0;
188 EXPORT unsigned int tmh_mempool_maxalloc = 0;
190 EXPORT DEFINE_SPINLOCK(tmh_page_list_lock);
191 EXPORT PAGE_LIST_HEAD(tmh_page_list);
192 EXPORT unsigned long tmh_page_list_pages = 0;
194 /* free anything on tmh_page_list to Xen's scrub list */
195 EXPORT void tmh_release_avail_pages_to_host(void)
196 {
197 spin_lock(&tmh_page_list_lock);
198 if ( !page_list_empty(&tmh_page_list) )
199 {
200 scrub_list_splice(&tmh_page_list);
201 INIT_PAGE_LIST_HEAD(&tmh_page_list);
202 tmh_page_list_pages = 0;
203 }
204 spin_unlock(&tmh_page_list_lock);
205 }
207 EXPORT void tmh_scrub_page(struct page_info *pi, unsigned int memflags)
208 {
209 if ( pi == NULL )
210 return;
211 if ( !(memflags & MEMF_tmem) )
212 scrub_one_page(pi);
213 }
215 #ifndef __i386__
216 static noinline void *tmh_mempool_page_get(unsigned long size)
217 {
218 struct page_info *pi;
220 ASSERT(size == PAGE_SIZE);
221 if ( (pi = tmh_alloc_page(NULL,0)) == NULL )
222 return NULL;
223 ASSERT(IS_VALID_PAGE(pi));
224 return page_to_virt(pi);
225 }
227 static void tmh_mempool_page_put(void *page_va)
228 {
229 ASSERT(IS_PAGE_ALIGNED(page_va));
230 tmh_free_page(virt_to_page(page_va));
231 }
233 static int tmh_mempool_init(void)
234 {
235 tmh_mempool = xmem_pool_create("tmem", tmh_mempool_page_get,
236 tmh_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
237 if ( tmh_mempool )
238 tmh_mempool_maxalloc = xmem_pool_maxalloc(tmh_mempool);
239 return tmh_mempool != NULL;
240 }
242 /* persistent pools are per-domain */
244 static void *tmh_persistent_pool_page_get(unsigned long size)
245 {
246 struct page_info *pi;
247 struct domain *d = current->domain;
249 ASSERT(size == PAGE_SIZE);
250 if ( (pi = _tmh_alloc_page_thispool(d)) == NULL )
251 return NULL;
252 ASSERT(IS_VALID_PAGE(pi));
253 return map_domain_page(page_to_mfn(pi));
254 }
256 static void tmh_persistent_pool_page_put(void *page_va)
257 {
258 struct page_info *pi;
260 ASSERT(IS_PAGE_ALIGNED(page_va));
261 pi = virt_to_page(page_va);
262 ASSERT(IS_VALID_PAGE(pi));
263 _tmh_free_page_thispool(pi);
264 }
265 #endif
267 /****************** XEN-SPECIFIC CLIENT HANDLING ********************/
269 EXPORT tmh_client_t *tmh_client_init(void)
270 {
271 tmh_client_t *tmh;
272 char name[5];
273 domid_t domid = current->domain->domain_id;
274 int i, shift;
276 if ( (tmh = xmalloc(tmh_client_t)) == NULL )
277 return NULL;
278 for (i = 0, shift = 12; i < 4; shift -=4, i++)
279 name[i] = ((unsigned short)domid >> shift) & 0xf;
280 name[4] = '\0';
281 #ifndef __i386__
282 tmh->persistent_pool = xmem_pool_create(name, tmh_persistent_pool_page_get,
283 tmh_persistent_pool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
284 if ( tmh->persistent_pool == NULL )
285 {
286 xfree(tmh);
287 return NULL;
288 }
289 #endif
290 tmh->domain = current->domain;
291 return tmh;
292 }
294 EXPORT void tmh_client_destroy(tmh_client_t *tmh)
295 {
296 #ifndef __i386__
297 xmem_pool_destroy(tmh->persistent_pool);
298 #endif
299 xfree(tmh);
300 }
302 /****************** XEN-SPECIFIC HOST INITIALIZATION ********************/
304 EXPORT int tmh_init(void)
305 {
306 #ifndef __i386__
307 int dstmem_order, workmem_order;
308 bool_t bad_alloc = 0;
309 struct page_info *pi;
310 unsigned char *p1, *p2;
311 int cpu;
313 if ( !tmh_mempool_init() )
314 return 0;
316 dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
317 workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS);
318 for_each_cpu ( cpu )
319 {
320 pi = alloc_domheap_pages(0,dstmem_order,0);
321 per_cpu(dstmem, cpu) = p1 = ((pi == NULL) ? NULL : page_to_virt(pi));
322 pi = alloc_domheap_pages(0,workmem_order,0);
323 per_cpu(workmem, cpu) = p2 = ((pi == NULL) ? NULL : page_to_virt(pi));
324 if ( (p1 == NULL) || (p2 == NULL) )
325 bad_alloc++;
326 }
327 if ( bad_alloc )
328 printk("tmem: can't allocate compression buffers for %d cpus\n",
329 bad_alloc);
330 #endif
331 return 1;
332 }