ia64/xen-unstable

view xen/common/tmem_xen.c @ 19749:f649dd4454af

tmem: fix minor accounting error

Reset a counter when all tmem pages are released. This
only affects status reporting (as displayed by xm tmem-list
or the just patched xenballoon-monitor) but the incorrectly
reported result is misleading.

Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jun 16 11:18:32 2009 +0100 (2009-06-16)
parents f210a633571c
children d96cf4c974d5
line source
1 /******************************************************************************
2 * tmem-xen.c
3 *
4 * Xen-specific Transcendent memory
5 *
6 * Copyright (c) 2009, Dan Magenheimer, Oracle Corp.
7 */
9 #include <xen/tmem.h>
10 #include <xen/tmem_xen.h>
11 #include <xen/lzo.h> /* compression code */
12 #include <xen/paging.h>
13 #include <xen/domain_page.h>
15 #define EXPORT /* indicates code other modules are dependent upon */
17 EXPORT int opt_tmem = 0;
18 boolean_param("tmem", opt_tmem);
20 EXPORT int opt_tmem_compress = 0;
21 boolean_param("tmem_compress", opt_tmem_compress);
23 EXPORT int opt_tmem_lock = 0;
24 integer_param("tmem_lock", opt_tmem_lock);
26 #ifdef COMPARE_COPY_PAGE_SSE2
27 DECL_CYC_COUNTER(pg_copy1);
28 DECL_CYC_COUNTER(pg_copy2);
29 DECL_CYC_COUNTER(pg_copy3);
30 DECL_CYC_COUNTER(pg_copy4);
31 #else
32 DECL_CYC_COUNTER(pg_copy);
33 #endif
35 /* these are a concurrency bottleneck, could be percpu and dynamically
36 * allocated iff opt_tmem_compress */
37 #define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
38 #define LZO_DSTMEM_PAGES 2
39 static DEFINE_PER_CPU(unsigned char *, workmem);
40 static DEFINE_PER_CPU(unsigned char *, dstmem);
42 #ifdef COMPARE_COPY_PAGE_SSE2
43 #include <asm/flushtlb.h> /* REMOVE ME AFTER TEST */
44 #include <asm/page.h> /* REMOVE ME AFTER TEST */
45 #endif
46 void tmh_copy_page(char *to, char*from)
47 {
48 #ifdef COMPARE_COPY_PAGE_SSE2
49 DECL_LOCAL_CYC_COUNTER(pg_copy1);
50 DECL_LOCAL_CYC_COUNTER(pg_copy2);
51 DECL_LOCAL_CYC_COUNTER(pg_copy3);
52 DECL_LOCAL_CYC_COUNTER(pg_copy4);
53 *to = *from; /* don't measure TLB misses */
54 flush_area_local(to,FLUSH_CACHE|FLUSH_ORDER(0));
55 flush_area_local(from,FLUSH_CACHE|FLUSH_ORDER(0));
56 START_CYC_COUNTER(pg_copy1);
57 copy_page_sse2(to, from); /* cold cache */
58 END_CYC_COUNTER(pg_copy1);
59 START_CYC_COUNTER(pg_copy2);
60 copy_page_sse2(to, from); /* hot cache */
61 END_CYC_COUNTER(pg_copy2);
62 flush_area_local(to,FLUSH_CACHE|FLUSH_ORDER(0));
63 flush_area_local(from,FLUSH_CACHE|FLUSH_ORDER(0));
64 START_CYC_COUNTER(pg_copy3);
65 memcpy(to, from, PAGE_SIZE); /* cold cache */
66 END_CYC_COUNTER(pg_copy3);
67 START_CYC_COUNTER(pg_copy4);
68 memcpy(to, from, PAGE_SIZE); /* hot cache */
69 END_CYC_COUNTER(pg_copy4);
70 #else
71 DECL_LOCAL_CYC_COUNTER(pg_copy);
72 START_CYC_COUNTER(pg_copy);
73 memcpy(to, from, PAGE_SIZE);
74 END_CYC_COUNTER(pg_copy);
75 #endif
76 }
78 #ifdef __ia64__
79 static inline void *cli_mfn_to_va(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn)
80 {
81 ASSERT(0);
82 }
83 #define paging_mark_dirty(_x,_y) do {} while(0)
84 #else
85 static inline void *cli_mfn_to_va(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn)
86 {
87 unsigned long cli_mfn;
88 p2m_type_t t;
91 if (is_pv_32on64_vcpu(current))
92 cmfn.p = (void *)((unsigned long)cmfn.p & 0xffffffffUL);
93 cli_mfn = mfn_x(gfn_to_mfn(current->domain,(unsigned long)cmfn.p,&t));
94 if (t != p2m_ram_rw)
95 return NULL;
96 if (pcli_mfn != NULL)
97 *pcli_mfn = cli_mfn;
98 return map_domain_page(cli_mfn);
99 }
100 #endif
102 EXPORT int tmh_copy_from_client(pfp_t *pfp,
103 tmem_cli_mfn_t cmfn, uint32_t tmem_offset,
104 uint32_t pfn_offset, uint32_t len)
105 {
106 unsigned long tmem_mfn;
107 void *tmem_va, *cli_va = NULL;
109 ASSERT(pfp != NULL);
110 if ( tmem_offset || pfn_offset || len )
111 if ( (cli_va = cli_mfn_to_va(cmfn,NULL)) == NULL)
112 return -EFAULT;
113 tmem_mfn = page_to_mfn(pfp);
114 tmem_va = map_domain_page(tmem_mfn);
115 mb();
116 if (!len && !tmem_offset && !pfn_offset)
117 memset(tmem_va, 0, PAGE_SIZE);
118 else if (len == PAGE_SIZE && !tmem_offset && !pfn_offset)
119 tmh_copy_page(tmem_va, cli_va);
120 else if ( (tmem_offset+len <= PAGE_SIZE) &&
121 (pfn_offset+len <= PAGE_SIZE) )
122 memcpy((char *)tmem_va+tmem_offset,(char *)cli_va+pfn_offset,len);
123 unmap_domain_page(cli_va);
124 unmap_domain_page(tmem_va);
125 return 1;
126 }
128 EXPORT int tmh_compress_from_client(tmem_cli_mfn_t cmfn,
129 void **out_va, size_t *out_len)
130 {
131 void *cli_va;
132 int ret = 0;
133 unsigned char *dmem = this_cpu(dstmem);
134 unsigned char *wmem = this_cpu(workmem);
136 if ( (cli_va = cli_mfn_to_va(cmfn,NULL)) == NULL)
137 return -EFAULT;
138 if ( dmem == NULL || wmem == NULL )
139 return 0; /* no buffer, so can't compress */
140 mb();
141 ret = lzo1x_1_compress(cli_va, PAGE_SIZE, dmem, out_len, wmem);
142 ASSERT(ret == LZO_E_OK);
143 *out_va = dmem;
144 unmap_domain_page(cli_va);
145 return 1;
146 }
148 EXPORT int tmh_copy_to_client(tmem_cli_mfn_t cmfn, pfp_t *pfp,
149 uint32_t tmem_offset, uint32_t pfn_offset, uint32_t len)
150 {
151 unsigned long tmem_mfn, cli_mfn;
152 void *tmem_va, *cli_va;
154 ASSERT(pfp != NULL);
155 if ( (cli_va = cli_mfn_to_va(cmfn,&cli_mfn)) == NULL)
156 return -EFAULT;
157 tmem_mfn = page_to_mfn(pfp);
158 tmem_va = map_domain_page(tmem_mfn);
159 if (len == PAGE_SIZE && !tmem_offset && !pfn_offset)
160 tmh_copy_page(cli_va, tmem_va);
161 else if ( (tmem_offset+len <= PAGE_SIZE) && (pfn_offset+len <= PAGE_SIZE) )
162 memcpy((char *)cli_va+pfn_offset,(char *)tmem_va+tmem_offset,len);
163 unmap_domain_page(tmem_va);
164 unmap_domain_page(cli_va);
165 paging_mark_dirty(current->domain,cli_mfn);
166 mb();
167 return 1;
168 }
170 EXPORT int tmh_decompress_to_client(tmem_cli_mfn_t cmfn, void *tmem_va, size_t size)
171 {
172 unsigned long cli_mfn;
173 void *cli_va;
174 size_t out_len = PAGE_SIZE;
175 int ret;
177 if ( (cli_va = cli_mfn_to_va(cmfn,&cli_mfn)) == NULL)
178 return -EFAULT;
179 ret = lzo1x_decompress_safe(tmem_va, size, cli_va, &out_len);
180 ASSERT(ret == LZO_E_OK);
181 ASSERT(out_len == PAGE_SIZE);
182 unmap_domain_page(cli_va);
183 paging_mark_dirty(current->domain,cli_mfn);
184 mb();
185 return 1;
186 }
188 /****************** XEN-SPECIFIC MEMORY ALLOCATION ********************/
190 EXPORT struct xmem_pool *tmh_mempool = 0;
191 EXPORT unsigned int tmh_mempool_maxalloc = 0;
193 EXPORT DEFINE_SPINLOCK(tmh_page_list_lock);
194 EXPORT PAGE_LIST_HEAD(tmh_page_list);
195 EXPORT unsigned long tmh_page_list_pages = 0;
197 /* free anything on tmh_page_list to Xen's scrub list */
198 EXPORT void tmh_release_avail_pages_to_host(void)
199 {
200 spin_lock(&tmh_page_list_lock);
201 if ( !page_list_empty(&tmh_page_list) )
202 {
203 scrub_list_splice(&tmh_page_list);
204 INIT_PAGE_LIST_HEAD(&tmh_page_list);
205 tmh_page_list_pages = 0;
206 }
207 spin_unlock(&tmh_page_list_lock);
208 }
210 EXPORT void tmh_scrub_page(struct page_info *pi, unsigned int memflags)
211 {
212 if ( pi == NULL )
213 return;
214 if ( !(memflags & MEMF_tmem) )
215 scrub_one_page(pi);
216 }
218 #ifndef __i386__
219 static noinline void *tmh_mempool_page_get(unsigned long size)
220 {
221 struct page_info *pi;
223 ASSERT(size == PAGE_SIZE);
224 if ( (pi = tmh_alloc_page(NULL,0)) == NULL )
225 return NULL;
226 ASSERT(IS_VALID_PAGE(pi));
227 return page_to_virt(pi);
228 }
230 static void tmh_mempool_page_put(void *page_va)
231 {
232 ASSERT(IS_PAGE_ALIGNED(page_va));
233 tmh_free_page(virt_to_page(page_va));
234 }
236 static int tmh_mempool_init(void)
237 {
238 tmh_mempool = xmem_pool_create("tmem", tmh_mempool_page_get,
239 tmh_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
240 if ( tmh_mempool )
241 tmh_mempool_maxalloc = xmem_pool_maxalloc(tmh_mempool);
242 return tmh_mempool != NULL;
243 }
245 /* persistent pools are per-domain */
247 static void *tmh_persistent_pool_page_get(unsigned long size)
248 {
249 struct page_info *pi;
250 struct domain *d = current->domain;
252 ASSERT(size == PAGE_SIZE);
253 if ( (pi = _tmh_alloc_page_thispool(d)) == NULL )
254 return NULL;
255 ASSERT(IS_VALID_PAGE(pi));
256 return map_domain_page(page_to_mfn(pi));
257 }
259 static void tmh_persistent_pool_page_put(void *page_va)
260 {
261 struct page_info *pi;
263 ASSERT(IS_PAGE_ALIGNED(page_va));
264 pi = virt_to_page(page_va);
265 ASSERT(IS_VALID_PAGE(pi));
266 _tmh_free_page_thispool(pi);
267 }
268 #endif
270 /****************** XEN-SPECIFIC CLIENT HANDLING ********************/
272 EXPORT tmh_client_t *tmh_client_init(void)
273 {
274 tmh_client_t *tmh;
275 char name[5];
276 domid_t domid = current->domain->domain_id;
277 int i, shift;
279 if ( (tmh = xmalloc(tmh_client_t)) == NULL )
280 return NULL;
281 for (i = 0, shift = 12; i < 4; shift -=4, i++)
282 name[i] = ((unsigned short)domid >> shift) & 0xf;
283 name[4] = '\0';
284 #ifndef __i386__
285 tmh->persistent_pool = xmem_pool_create(name, tmh_persistent_pool_page_get,
286 tmh_persistent_pool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
287 if ( tmh->persistent_pool == NULL )
288 {
289 xfree(tmh);
290 return NULL;
291 }
292 #endif
293 tmh->domain = current->domain;
294 return tmh;
295 }
297 EXPORT void tmh_client_destroy(tmh_client_t *tmh)
298 {
299 #ifndef __i386__
300 xmem_pool_destroy(tmh->persistent_pool);
301 #endif
302 xfree(tmh);
303 }
305 /****************** XEN-SPECIFIC HOST INITIALIZATION ********************/
307 EXPORT int tmh_init(void)
308 {
309 #ifndef __i386__
310 int dstmem_order, workmem_order;
311 bool_t bad_alloc = 0;
312 struct page_info *pi;
313 unsigned char *p1, *p2;
314 int cpu;
316 if ( !tmh_mempool_init() )
317 return 0;
319 dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
320 workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS);
321 for_each_cpu ( cpu )
322 {
323 pi = alloc_domheap_pages(0,dstmem_order,0);
324 per_cpu(dstmem, cpu) = p1 = ((pi == NULL) ? NULL : page_to_virt(pi));
325 pi = alloc_domheap_pages(0,workmem_order,0);
326 per_cpu(workmem, cpu) = p2 = ((pi == NULL) ? NULL : page_to_virt(pi));
327 if ( (p1 == NULL) || (p2 == NULL) )
328 bad_alloc++;
329 }
330 if ( bad_alloc )
331 printk("tmem: can't allocate compression buffers for %d cpus\n",
332 bad_alloc);
333 #endif
334 return 1;
335 }