ia64/xen-unstable

view tools/ioemu/hw/xen_machine_fv.c @ 16337:1d0a2cb9f383

[IA64] vti save-restore: ia64 qemu-dm boot clean up.

Use xc_get_hvm_param()

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Wed Nov 07 10:41:43 2007 -0700 (2007-11-07)
parents dfe9c0c10a2c
children a1247c2df2b4
line source
1 /*
2 * QEMU Xen FV Machine
3 *
4 * Copyright (c) 2003-2007 Fabrice Bellard
5 * Copyright (c) 2007 Red Hat
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
26 #include "vl.h"
27 #include <xen/hvm/params.h>
28 #include <sys/mman.h>
30 #ifndef PAGE_SIZE
31 #define PAGE_SIZE XC_PAGE_SIZE
32 #endif
33 #ifndef PAGE_SHIFT
34 #define PAGE_SHIFT XC_PAGE_SHIFT
35 #endif
37 #if defined(MAPCACHE)
39 #if defined(__i386__)
40 #define MAX_MCACHE_SIZE 0x40000000 /* 1GB max for x86 */
41 #define MCACHE_BUCKET_SHIFT 16
42 #elif defined(__x86_64__)
43 #define MAX_MCACHE_SIZE 0x1000000000 /* 64GB max for x86_64 */
44 #define MCACHE_BUCKET_SHIFT 20
45 #endif
47 #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
49 #define BITS_PER_LONG (sizeof(long)*8)
50 #define BITS_TO_LONGS(bits) \
51 (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
52 #define DECLARE_BITMAP(name,bits) \
53 unsigned long name[BITS_TO_LONGS(bits)]
54 #define test_bit(bit,map) \
55 (!!((map)[(bit)/BITS_PER_LONG] & (1UL << ((bit)%BITS_PER_LONG))))
57 struct map_cache {
58 unsigned long paddr_index;
59 uint8_t *vaddr_base;
60 DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE>>PAGE_SHIFT);
61 };
63 static struct map_cache *mapcache_entry;
64 static unsigned long nr_buckets;
66 /* For most cases (>99.9%), the page address is the same. */
67 static unsigned long last_address_index = ~0UL;
68 static uint8_t *last_address_vaddr;
70 static int qemu_map_cache_init(void)
71 {
72 unsigned long size;
74 nr_buckets = (((MAX_MCACHE_SIZE >> PAGE_SHIFT) +
75 (1UL << (MCACHE_BUCKET_SHIFT - PAGE_SHIFT)) - 1) >>
76 (MCACHE_BUCKET_SHIFT - PAGE_SHIFT));
78 /*
79 * Use mmap() directly: lets us allocate a big hash table with no up-front
80 * cost in storage space. The OS will allocate memory only for the buckets
81 * that we actually use. All others will contain all zeroes.
82 */
83 size = nr_buckets * sizeof(struct map_cache);
84 size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
85 fprintf(logfile, "qemu_map_cache_init nr_buckets = %lx size %lu\n", nr_buckets, size);
86 mapcache_entry = mmap(NULL, size, PROT_READ|PROT_WRITE,
87 MAP_SHARED|MAP_ANON, -1, 0);
88 if (mapcache_entry == MAP_FAILED) {
89 errno = ENOMEM;
90 return -1;
91 }
93 return 0;
94 }
96 static void qemu_remap_bucket(struct map_cache *entry,
97 unsigned long address_index)
98 {
99 uint8_t *vaddr_base;
100 unsigned long pfns[MCACHE_BUCKET_SIZE >> PAGE_SHIFT];
101 unsigned int i, j;
103 if (entry->vaddr_base != NULL) {
104 errno = munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE);
105 if (errno) {
106 fprintf(logfile, "unmap fails %d\n", errno);
107 exit(-1);
108 }
109 }
111 for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i++)
112 pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-PAGE_SHIFT)) + i;
114 vaddr_base = xc_map_foreign_batch(xc_handle, domid, PROT_READ|PROT_WRITE,
115 pfns, MCACHE_BUCKET_SIZE >> PAGE_SHIFT);
116 if (vaddr_base == NULL) {
117 fprintf(logfile, "xc_map_foreign_batch error %d\n", errno);
118 exit(-1);
119 }
121 entry->vaddr_base = vaddr_base;
122 entry->paddr_index = address_index;
124 for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i += BITS_PER_LONG) {
125 unsigned long word = 0;
126 j = ((i + BITS_PER_LONG) > (MCACHE_BUCKET_SIZE >> PAGE_SHIFT)) ?
127 (MCACHE_BUCKET_SIZE >> PAGE_SHIFT) % BITS_PER_LONG : BITS_PER_LONG;
128 while (j > 0)
129 word = (word << 1) | (((pfns[i + --j] >> 28) & 0xf) != 0xf);
130 entry->valid_mapping[i / BITS_PER_LONG] = word;
131 }
132 }
134 uint8_t *qemu_map_cache(target_phys_addr_t phys_addr)
135 {
136 struct map_cache *entry;
137 unsigned long address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
138 unsigned long address_offset = phys_addr & (MCACHE_BUCKET_SIZE-1);
140 if (address_index == last_address_index)
141 return last_address_vaddr + address_offset;
143 entry = &mapcache_entry[address_index % nr_buckets];
145 if (entry->vaddr_base == NULL || entry->paddr_index != address_index ||
146 !test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping))
147 qemu_remap_bucket(entry, address_index);
149 if (!test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping))
150 return NULL;
152 last_address_index = address_index;
153 last_address_vaddr = entry->vaddr_base;
155 return last_address_vaddr + address_offset;
156 }
158 void qemu_invalidate_map_cache(void)
159 {
160 unsigned long i;
162 mapcache_lock();
164 for (i = 0; i < nr_buckets; i++) {
165 struct map_cache *entry = &mapcache_entry[i];
167 if (entry->vaddr_base == NULL)
168 continue;
170 errno = munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE);
171 if (errno) {
172 fprintf(logfile, "unmap fails %d\n", errno);
173 exit(-1);
174 }
176 entry->paddr_index = 0;
177 entry->vaddr_base = NULL;
178 }
180 last_address_index = ~0UL;
181 last_address_vaddr = NULL;
183 mapcache_unlock();
184 }
186 #endif /* defined(MAPCACHE) */
189 static void xen_init_fv(uint64_t ram_size, int vga_ram_size, char *boot_device,
190 DisplayState *ds, const char **fd_filename,
191 int snapshot,
192 const char *kernel_filename,
193 const char *kernel_cmdline,
194 const char *initrd_filename,
195 const char *direct_pci)
196 {
197 unsigned long ioreq_pfn;
198 extern void *shared_page;
199 extern void *buffered_io_page;
200 #ifdef __ia64__
201 unsigned long nr_pages;
202 xen_pfn_t *page_array;
203 extern void *buffered_pio_page;
204 int i;
205 #endif
207 #if defined(__i386__) || defined(__x86_64__)
209 if (qemu_map_cache_init()) {
210 fprintf(logfile, "qemu_map_cache_init returned: error %d\n", errno);
211 exit(-1);
212 }
214 xc_get_hvm_param(xc_handle, domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
215 fprintf(logfile, "shared page at pfn %lx\n", ioreq_pfn);
216 shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
217 PROT_READ|PROT_WRITE, ioreq_pfn);
218 if (shared_page == NULL) {
219 fprintf(logfile, "map shared IO page returned error %d\n", errno);
220 exit(-1);
221 }
223 xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
224 fprintf(logfile, "buffered io page at pfn %lx\n", ioreq_pfn);
225 buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
226 PROT_READ|PROT_WRITE, ioreq_pfn);
227 if (buffered_io_page == NULL) {
228 fprintf(logfile, "map buffered IO page returned error %d\n", errno);
229 exit(-1);
230 }
232 #elif defined(__ia64__)
234 xc_get_hvm_param(xc_handle, domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
235 fprintf(logfile, "shared page at pfn %lx\n", ioreq_pfn);
236 shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
237 PROT_READ|PROT_WRITE, ioreq_pfn);
238 if (shared_page == NULL) {
239 fprintf(logfile, "map shared IO page returned error %d\n", errno);
240 exit(-1);
241 }
243 xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
244 fprintf(logfile, "buffered io page at pfn %lx\n", ioreq_pfn);
245 buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
246 PROT_READ|PROT_WRITE, ioreq_pfn);
247 if (buffered_io_page == NULL) {
248 fprintf(logfile, "map buffered IO page returned error %d\n", errno);
249 exit(-1);
250 }
252 xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFPIOREQ_PFN, &ioreq_pfn);
253 fprintf(logfile, "buffered pio page at pfn %lx\n", ioreq_pfn);
254 buffered_pio_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
255 PROT_READ|PROT_WRITE, ioreq_pfn);
256 if (buffered_pio_page == NULL) {
257 fprintf(logfile, "map buffered PIO page returned error %d\n", errno);
258 exit(-1);
259 }
261 nr_pages = ram_size / PAGE_SIZE;
263 page_array = (xen_pfn_t *)malloc(nr_pages * sizeof(xen_pfn_t));
264 if (page_array == NULL) {
265 fprintf(logfile, "malloc returned error %d\n", errno);
266 exit(-1);
267 }
269 for (i = 0; i < nr_pages; i++)
270 page_array[i] = i;
272 /* VTI will not use memory between 3G~4G, so we just pass a legal pfn
273 to make QEMU map continuous virtual memory space */
274 if (ram_size > MMIO_START) {
275 for (i = 0 ; i < (MEM_G >> PAGE_SHIFT); i++)
276 page_array[(MMIO_START >> PAGE_SHIFT) + i] =
277 (STORE_PAGE_START >> PAGE_SHIFT);
278 }
280 phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
281 PROT_READ|PROT_WRITE,
282 page_array, nr_pages);
283 if (phys_ram_base == 0) {
284 fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
285 exit(-1);
286 }
287 free(page_array);
288 #endif
290 timeoffset_get();
293 pc_machine.init(ram_size, vga_ram_size, boot_device, ds, fd_filename,
294 snapshot, kernel_filename, kernel_cmdline, initrd_filename,
295 direct_pci);
296 }
298 QEMUMachine xenfv_machine = {
299 "xenfv",
300 "Xen Fully-virtualized PC",
301 xen_init_fv,
302 };
304 /*
305 * Local variables:
306 * indent-tabs-mode: nil
307 * c-indent-level: 4
308 * c-basic-offset: 4
309 * tab-width: 4
310 * End:
311 */