--- /dev/null
+/*
+ * virtual page mapping and translated block handling
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "config.h"
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sys/types.h>
+#include <sys/mman.h>
+#endif
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+#include "vl.h"
+
+//#define DEBUG_TB_INVALIDATE
+//#define DEBUG_FLUSH
+//#define DEBUG_TLB
+
+/* make various TB consistency checks */
+//#define DEBUG_TB_CHECK
+//#define DEBUG_TLB_CHECK
+
+#ifndef CONFIG_DM
+/* threshold to flush the translated code buffer */
+#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
+
+#define SMC_BITMAP_USE_THRESHOLD 10
+
+#define MMAP_AREA_START 0x00000000
+#define MMAP_AREA_END 0xa8000000
+
+TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
+TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
+TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
+int nb_tbs;
+/* any access to the tbs or the page table must use this lock */
+spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
+
+uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
+uint8_t *code_gen_ptr;
+#endif /* !CONFIG_DM */
+
+uint64_t phys_ram_size;
+extern uint64_t ram_size;
+int phys_ram_fd;
+uint8_t *phys_ram_base;
+uint8_t *phys_ram_dirty;
+
+CPUState *first_cpu;
+/* current CPU in the current thread. It is only valid inside
+ cpu_exec() */
+CPUState *cpu_single_env;
+
+typedef struct PageDesc {
+ /* list of TBs intersecting this ram page */
+ TranslationBlock *first_tb;
+ /* in order to optimize self modifying code, we count the number
+ of lookups we do to a given page to use a bitmap */
+ unsigned int code_write_count;
+ uint8_t *code_bitmap;
+#if defined(CONFIG_USER_ONLY)
+ unsigned long flags;
+#endif
+} PageDesc;
+
+typedef struct PhysPageDesc {
+ /* offset in host memory of the page + io_index in the low 12 bits */
+ unsigned long phys_offset;
+} PhysPageDesc;
+
+typedef struct VirtPageDesc {
+ /* physical address of code page. It is valid only if 'valid_tag'
+ matches 'virt_valid_tag' */
+ target_ulong phys_addr;
+ unsigned int valid_tag;
+#if !defined(CONFIG_SOFTMMU)
+ /* original page access rights. It is valid only if 'valid_tag'
+ matches 'virt_valid_tag' */
+ unsigned int prot;
+#endif
+} VirtPageDesc;
+
+#define L2_BITS 10
+#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
+
+#define L1_SIZE (1 << L1_BITS)
+#define L2_SIZE (1 << L2_BITS)
+
+unsigned long qemu_real_host_page_size;
+unsigned long qemu_host_page_bits;
+unsigned long qemu_host_page_size;
+unsigned long qemu_host_page_mask;
+
+/* io memory support */
+CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
+CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
+void *io_mem_opaque[IO_MEM_NB_ENTRIES];
+static int io_mem_nb = 1;
+
+/* log support */
+FILE *logfile;
+int loglevel;
+
+void cpu_exec_init(CPUState *env)
+{
+ CPUState **penv;
+ int cpu_index;
+
+ env->next_cpu = NULL;
+ penv = &first_cpu;
+ cpu_index = 0;
+ while (*penv != NULL) {
+ penv = (CPUState **)&(*penv)->next_cpu;
+ cpu_index++;
+ }
+ env->cpu_index = cpu_index;
+ *penv = env;
+
+ /* alloc dirty bits array */
+ phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
+}
+
+/* enable or disable low levels log */
+void cpu_set_log(int log_flags)
+{
+ loglevel = log_flags;
+ if (!logfile)
+ logfile = stderr;
+}
+
+void cpu_set_log_filename(const char *filename)
+{
+ logfile = fopen(filename, "w");
+ if (!logfile) {
+ perror(filename);
+ _exit(1);
+ }
+#if !defined(CONFIG_SOFTMMU)
+ /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
+ {
+ static uint8_t logfile_buf[4096];
+ setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
+ }
+#else
+ setvbuf(logfile, NULL, _IOLBF, 0);
+#endif
+ dup2(fileno(logfile), 1);
+ dup2(fileno(logfile), 2);
+}
+
+/* mask must never be zero, except for A20 change call */
+void cpu_interrupt(CPUState *env, int mask)
+{
+ env->interrupt_request |= mask;
+}
+
+void cpu_reset_interrupt(CPUState *env, int mask)
+{
+ env->interrupt_request &= ~mask;
+}
+
+CPULogItem cpu_log_items[] = {
+ { CPU_LOG_TB_OUT_ASM, "out_asm",
+ "show generated host assembly code for each compiled TB" },
+ { CPU_LOG_TB_IN_ASM, "in_asm",
+ "show target assembly code for each compiled TB" },
+ { CPU_LOG_TB_OP, "op",
+ "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
+#ifdef TARGET_I386
+ { CPU_LOG_TB_OP_OPT, "op_opt",
+ "show micro ops after optimization for each compiled TB" },
+#endif
+ { CPU_LOG_INT, "int",
+ "show interrupts/exceptions in short format" },
+ { CPU_LOG_EXEC, "exec",
+ "show trace before each executed TB (lots of logs)" },
+ { CPU_LOG_TB_CPU, "cpu",
+ "show CPU state before bloc translation" },
+#ifdef TARGET_I386
+ { CPU_LOG_PCALL, "pcall",
+ "show protected mode far calls/returns/exceptions" },
+#endif
+#ifdef DEBUG_IOPORT
+ { CPU_LOG_IOPORT, "ioport",
+ "show all i/o ports accesses" },
+#endif
+ { 0, NULL, NULL },
+};
+
+static int cmp1(const char *s1, int n, const char *s2)
+{
+ if (strlen(s2) != n)
+ return 0;
+ return memcmp(s1, s2, n) == 0;
+}
+
+/* takes a comma separated list of log masks. Return 0 if error. */
+int cpu_str_to_log_mask(const char *str)
+{
+ CPULogItem *item;
+ int mask;
+ const char *p, *p1;
+
+ p = str;
+ mask = 0;
+ for(;;) {
+ p1 = strchr(p, ',');
+ if (!p1)
+ p1 = p + strlen(p);
+ if(cmp1(p,p1-p,"all")) {
+ for(item = cpu_log_items; item->mask != 0; item++) {
+ mask |= item->mask;
+ }
+ } else {
+ for(item = cpu_log_items; item->mask != 0; item++) {
+ if (cmp1(p, p1 - p, item->name))
+ goto found;
+ }
+ return 0;
+ }
+ found:
+ mask |= item->mask;
+ if (*p1 != ',')
+ break;
+ p = p1 + 1;
+ }
+ return mask;
+}
+
+void cpu_abort(CPUState *env, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ fprintf(stderr, "qemu: fatal: ");
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, "\n");
+ va_end(ap);
+ abort();
+}
+
+
+/* XXX: Simple implementation. Fix later */
+#define MAX_MMIO 32
+struct mmio_space {
+ target_phys_addr_t start;
+ unsigned long size;
+ unsigned long io_index;
+} mmio[MAX_MMIO];
+unsigned long mmio_cnt;
+
+/* register physical memory. 'size' must be a multiple of the target
+ page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
+ io memory page */
+void cpu_register_physical_memory(target_phys_addr_t start_addr,
+ unsigned long size,
+ unsigned long phys_offset)
+{
+ int i;
+
+ for (i = 0; i < mmio_cnt; i++) {
+ if(mmio[i].start == start_addr) {
+ mmio[i].io_index = phys_offset;
+ mmio[i].size = size;
+ return;
+ }
+ }
+
+ if (mmio_cnt == MAX_MMIO) {
+ fprintf(logfile, "too many mmio regions\n");
+ exit(-1);
+ }
+
+ mmio[mmio_cnt].io_index = phys_offset;
+ mmio[mmio_cnt].start = start_addr;
+ mmio[mmio_cnt++].size = size;
+}
+
+/* mem_read and mem_write are arrays of functions containing the
+ function to access byte (index 0), word (index 1) and dword (index
+ 2). All functions must be supplied. If io_index is non zero, the
+ corresponding io zone is modified. If it is zero, a new io zone is
+ allocated. The return value can be used with
+ cpu_register_physical_memory(). (-1) is returned if error. */
+int cpu_register_io_memory(int io_index,
+ CPUReadMemoryFunc **mem_read,
+ CPUWriteMemoryFunc **mem_write,
+ void *opaque)
+{
+ int i;
+
+ if (io_index <= 0) {
+ if (io_index >= IO_MEM_NB_ENTRIES)
+ return -1;
+ io_index = io_mem_nb++;
+ } else {
+ if (io_index >= IO_MEM_NB_ENTRIES)
+ return -1;
+ }
+
+ for(i = 0;i < 3; i++) {
+ io_mem_read[io_index][i] = mem_read[i];
+ io_mem_write[io_index][i] = mem_write[i];
+ }
+ io_mem_opaque[io_index] = opaque;
+ return io_index << IO_MEM_SHIFT;
+}
+
+CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
+{
+ return io_mem_write[io_index >> IO_MEM_SHIFT];
+}
+
+CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
+{
+ return io_mem_read[io_index >> IO_MEM_SHIFT];
+}
+
+#ifdef __ia64__
+
+#define __ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
+#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
+#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
+
+/* IA64 has seperate I/D cache, with coherence maintained by DMA controller.
+ * So to emulate right behavior that guest OS is assumed, we need to flush
+ * I/D cache here.
+ */
+static void sync_icache(uint8_t *address, int len)
+{
+ unsigned long addr = (unsigned long)address;
+ unsigned long end = addr + len;
+
+ for (addr &= ~(32UL-1); addr < end; addr += 32UL)
+ __ia64_fc(addr);
+
+ ia64_sync_i();
+ ia64_srlz_i();
+}
+#endif
+
+/* physical memory access (slow version, mainly for debug) */
+#if defined(CONFIG_USER_ONLY)
+void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
+ int len, int is_write)
+{
+ int l, flags;
+ target_ulong page;
+
+ while (len > 0) {
+ page = addr & TARGET_PAGE_MASK;
+ l = (page + TARGET_PAGE_SIZE) - addr;
+ if (l > len)
+ l = len;
+ flags = page_get_flags(page);
+ if (!(flags & PAGE_VALID))
+ return;
+ if (is_write) {
+ if (!(flags & PAGE_WRITE))
+ return;
+ memcpy((uint8_t *)addr, buf, len);
+ } else {
+ if (!(flags & PAGE_READ))
+ return;
+ memcpy(buf, (uint8_t *)addr, len);
+ }
+ len -= l;
+ buf += l;
+ addr += l;
+ }
+}
+#else
+
+int iomem_index(target_phys_addr_t addr)
+{
+ int i;
+
+ for (i = 0; i < mmio_cnt; i++) {
+ unsigned long start, end;
+
+ start = mmio[i].start;
+ end = mmio[i].start + mmio[i].size;
+
+ if ((addr >= start) && (addr < end)){
+ return (mmio[i].io_index >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ }
+ }
+ return 0;
+}
+
+#if defined(__i386__) || defined(__x86_64__)
+#define phys_ram_addr(x) (qemu_map_cache(x))
+#elif defined(__ia64__)
+#define phys_ram_addr(x) (((x) < ram_size) ? (phys_ram_base + (x)) : NULL)
+#endif
+
+extern unsigned long *logdirty_bitmap;
+extern unsigned long logdirty_bitmap_size;
+
+/*
+ * Replace the standard byte memcpy with a word memcpy for appropriately sized
+ * memory copy operations. Some users (USB-UHCI) can not tolerate the possible
+ * word tearing that can result from a guest concurrently writing a memory
+ * structure while the qemu device model is modifying the same location.
+ * Forcing a word-sized read/write prevents the guest from seeing a partially
+ * written word-sized atom.
+ */
+#if defined(__x86_64__) || defined(__i386__)
+static void memcpy_words(void *dst, void *src, size_t n)
+{
+ asm volatile (
+ " movl %%edx,%%ecx \n"
+#ifdef __x86_64__
+ " shrl $3,%%ecx \n"
+ " rep movsq \n"
+ " test $4,%%edx \n"
+ " jz 1f \n"
+ " movsl \n"
+#else /* __i386__ */
+ " shrl $2,%%ecx \n"
+ " rep movsl \n"
+#endif
+ "1: test $2,%%edx \n"
+ " jz 1f \n"
+ " movsw \n"
+ "1: test $1,%%edx \n"
+ " jz 1f \n"
+ " movsb \n"
+ "1: \n"
+ : "+S" (src), "+D" (dst) : "d" (n) : "ecx", "memory" );
+}
+#else
+static void memcpy_words(void *dst, void *src, size_t n)
+{
+ /* Some architectures do not like unaligned accesses. */
+ if (((unsigned long)dst | (unsigned long)src) & 3) {
+ memcpy(dst, src, n);
+ return;
+ }
+
+ while (n >= sizeof(uint32_t)) {
+ *((uint32_t *)dst) = *((uint32_t *)src);
+ dst = ((uint32_t *)dst) + 1;
+ src = ((uint32_t *)src) + 1;
+ n -= sizeof(uint32_t);
+ }
+
+ if (n & 2) {
+ *((uint16_t *)dst) = *((uint16_t *)src);
+ dst = ((uint16_t *)dst) + 1;
+ src = ((uint16_t *)src) + 1;
+ }
+
+ if (n & 1) {
+ *((uint8_t *)dst) = *((uint8_t *)src);
+ dst = ((uint8_t *)dst) + 1;
+ src = ((uint8_t *)src) + 1;
+ }
+}
+#endif
+
+void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
+ int len, int is_write)
+{
+ int l, io_index;
+ uint8_t *ptr;
+ uint32_t val;
+
+ mapcache_lock();
+
+ while (len > 0) {
+ /* How much can we copy before the next page boundary? */
+ l = TARGET_PAGE_SIZE - (addr & ~TARGET_PAGE_MASK);
+ if (l > len)
+ l = len;
+
+ io_index = iomem_index(addr);
+ if (is_write) {
+ if (io_index) {
+ if (l >= 4 && ((addr & 3) == 0)) {
+ /* 32 bit read access */
+ val = ldl_raw(buf);
+ io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
+ l = 4;
+ } else if (l >= 2 && ((addr & 1) == 0)) {
+ /* 16 bit read access */
+ val = lduw_raw(buf);
+ io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
+ l = 2;
+ } else {
+ /* 8 bit access */
+ val = ldub_raw(buf);
+ io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
+ l = 1;
+ }
+ } else if ((ptr = phys_ram_addr(addr)) != NULL) {
+ /* Writing to RAM */
+ memcpy_words(ptr, buf, l);
+ if (logdirty_bitmap != NULL) {
+ /* Record that we have dirtied this frame */
+ unsigned long pfn = addr >> TARGET_PAGE_BITS;
+ if (pfn / 8 >= logdirty_bitmap_size) {
+ fprintf(logfile, "dirtying pfn %lx >= bitmap "
+ "size %lx\n", pfn, logdirty_bitmap_size * 8);
+ } else {
+ logdirty_bitmap[pfn / HOST_LONG_BITS]
+ |= 1UL << pfn % HOST_LONG_BITS;
+ }
+ }
+#ifdef __ia64__
+ sync_icache(ptr, l);
+#endif
+ }
+ } else {
+ if (io_index) {
+ if (l >= 4 && ((addr & 3) == 0)) {
+ /* 32 bit read access */
+ val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
+ stl_raw(buf, val);
+ l = 4;
+ } else if (l >= 2 && ((addr & 1) == 0)) {
+ /* 16 bit read access */
+ val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
+ stw_raw(buf, val);
+ l = 2;
+ } else {
+ /* 8 bit access */
+ val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
+ stb_raw(buf, val);
+ l = 1;
+ }
+ } else if ((ptr = phys_ram_addr(addr)) != NULL) {
+ /* Reading from RAM */
+ memcpy_words(buf, ptr, l);
+ } else {
+ /* Neither RAM nor known MMIO space */
+ memset(buf, 0xff, len);
+ }
+ }
+ len -= l;
+ buf += l;
+ addr += l;
+ }
+
+ mapcache_unlock();
+}
+#endif
+
+/* virtual memory access for debug */
+int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
+ uint8_t *buf, int len, int is_write)
+{
+ int l;
+ target_ulong page, phys_addr;
+
+ while (len > 0) {
+ page = addr & TARGET_PAGE_MASK;
+ phys_addr = cpu_get_phys_page_debug(env, page);
+ /* if no physical page mapped, return an error */
+ if (phys_addr == -1)
+ return -1;
+ l = (page + TARGET_PAGE_SIZE) - addr;
+ if (l > len)
+ l = len;
+ cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
+ buf, l, is_write);
+ len -= l;
+ buf += l;
+ addr += l;
+ }
+ return 0;
+}
+
+void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
+ int dirty_flags)
+{
+ unsigned long length;
+ int i, mask, len;
+ uint8_t *p;
+
+ start &= TARGET_PAGE_MASK;
+ end = TARGET_PAGE_ALIGN(end);
+
+ length = end - start;
+ if (length == 0)
+ return;
+ mask = ~dirty_flags;
+ p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
+ len = length >> TARGET_PAGE_BITS;
+ for(i = 0; i < len; i++)
+ p[i] &= mask;
+
+ return;
+}
--- /dev/null
+/*
+ * i386 helpers (without register variable usage)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * Main cpu loop for handling I/O requests coming from a virtual machine
+ * Copyright © 2004, Intel Corporation.
+ * Copyright © 2005, International Business Machines Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU Lesser General Public License,
+ * version 2.1, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <assert.h>
+
+#include <limits.h>
+#include <fcntl.h>
+
+#include <xenctrl.h>
+#include <xen/hvm/ioreq.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+
+//#define DEBUG_MMU
+
+#ifdef USE_CODE_COPY
+#include <asm/ldt.h>
+#include <linux/unistd.h>
+#include <linux/version.h>
+
+_syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
+#define modify_ldt_ldt_s user_desc
+#endif
+#endif /* USE_CODE_COPY */
+
+#include "vl.h"
+
+int domid = -1;
+int vcpus = 1;
+
+extern int xc_handle;
+
+long time_offset = 0;
+
+shared_iopage_t *shared_page = NULL;
+
+#define BUFFER_IO_MAX_DELAY 100
+buffered_iopage_t *buffered_io_page = NULL;
+QEMUTimer *buffered_io_timer;
+
+/* the evtchn fd for polling */
+int xce_handle = -1;
+
+/* which vcpu we are serving */
+int send_vcpu = 0;
+
+//the evtchn port for polling the notification,
+#define NR_CPUS 32
+evtchn_port_t ioreq_local_port[NR_CPUS];
+
+CPUX86State *cpu_x86_init(void)
+{
+ CPUX86State *env;
+ static int inited;
+ int i, rc;
+
+ env = qemu_mallocz(sizeof(CPUX86State));
+ if (!env)
+ return NULL;
+ cpu_exec_init(env);
+
+ /* init various static tables */
+ if (!inited) {
+ inited = 1;
+
+ cpu_single_env = env;
+
+ xce_handle = xc_evtchn_open();
+ if (xce_handle == -1) {
+ perror("open");
+ return NULL;
+ }
+
+ /* FIXME: how about if we overflow the page here? */
+ for (i = 0; i < vcpus; i++) {
+ rc = xc_evtchn_bind_interdomain(
+ xce_handle, domid, shared_page->vcpu_iodata[i].vp_eport);
+ if (rc == -1) {
+ fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
+ return NULL;
+ }
+ ioreq_local_port[i] = rc;
+ }
+ }
+
+ return env;
+}
+
+/* called from main_cpu_reset */
+void cpu_reset(CPUX86State *env)
+{
+ int xcHandle;
+ int sts;
+
+ xcHandle = xc_interface_open();
+ if (xcHandle < 0)
+ fprintf(logfile, "Cannot acquire xenctrl handle\n");
+ else {
+ xc_domain_shutdown_hook(xcHandle, domid);
+ sts = xc_domain_shutdown(xcHandle, domid, SHUTDOWN_reboot);
+ if (sts != 0)
+ fprintf(logfile,
+ "? xc_domain_shutdown failed to issue reboot, sts %d\n",
+ sts);
+ else
+ fprintf(logfile, "Issued domain %d reboot\n", domid);
+ xc_interface_close(xcHandle);
+ }
+}
+
+void cpu_x86_close(CPUX86State *env)
+{
+ free(env);
+}
+
+
+void cpu_dump_state(CPUState *env, FILE *f,
+ int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
+ int flags)
+{
+}
+
+/***********************************************************/
+/* x86 mmu */
+/* XXX: add PGE support */
+
+void cpu_x86_set_a20(CPUX86State *env, int a20_state)
+{
+ a20_state = (a20_state != 0);
+ if (a20_state != ((env->a20_mask >> 20) & 1)) {
+#if defined(DEBUG_MMU)
+ printf("A20 update: a20=%d\n", a20_state);
+#endif
+ env->a20_mask = 0xffefffff | (a20_state << 20);
+ }
+}
+
+target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
+{
+ return addr;
+}
+
+//some functions to handle the io req packet
+void sp_info()
+{
+ ioreq_t *req;
+ int i;
+
+ for (i = 0; i < vcpus; i++) {
+ req = &(shared_page->vcpu_iodata[i].vp_ioreq);
+ term_printf("vcpu %d: event port %d\n", i, ioreq_local_port[i]);
+ term_printf(" req state: %x, ptr: %x, addr: %"PRIx64", "
+ "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
+ req->state, req->data_is_ptr, req->addr,
+ req->data, req->count, req->size);
+ term_printf(" IO totally occurred on this vcpu: %"PRIx64"\n",
+ req->io_count);
+ }
+}
+
+//get the ioreq packets from share mem
+static ioreq_t *__cpu_get_ioreq(int vcpu)
+{
+ ioreq_t *req;
+
+ req = &(shared_page->vcpu_iodata[vcpu].vp_ioreq);
+
+ if (req->state != STATE_IOREQ_READY) {
+ fprintf(logfile, "I/O request not ready: "
+ "%x, ptr: %x, port: %"PRIx64", "
+ "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
+ req->state, req->data_is_ptr, req->addr,
+ req->data, req->count, req->size);
+ return NULL;
+ }
+
+ xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
+
+ req->state = STATE_IOREQ_INPROCESS;
+ return req;
+}
+
+//use poll to get the port notification
+//ioreq_vec--out,the
+//retval--the number of ioreq packet
+static ioreq_t *cpu_get_ioreq(void)
+{
+ int i;
+ evtchn_port_t port;
+
+ port = xc_evtchn_pending(xce_handle);
+ if (port != -1) {
+ for ( i = 0; i < vcpus; i++ )
+ if ( ioreq_local_port[i] == port )
+ break;
+
+ if ( i == vcpus ) {
+ fprintf(logfile, "Fatal error while trying to get io event!\n");
+ exit(1);
+ }
+
+ // unmask the wanted port again
+ xc_evtchn_unmask(xce_handle, port);
+
+ //get the io packet from shared memory
+ send_vcpu = i;
+ return __cpu_get_ioreq(i);
+ }
+
+ //read error or read nothing
+ return NULL;
+}
+
+unsigned long do_inp(CPUState *env, unsigned long addr, unsigned long size)
+{
+ switch(size) {
+ case 1:
+ return cpu_inb(env, addr);
+ case 2:
+ return cpu_inw(env, addr);
+ case 4:
+ return cpu_inl(env, addr);
+ default:
+ fprintf(logfile, "inp: bad size: %lx %lx\n", addr, size);
+ exit(-1);
+ }
+}
+
+void do_outp(CPUState *env, unsigned long addr,
+ unsigned long size, unsigned long val)
+{
+ switch(size) {
+ case 1:
+ return cpu_outb(env, addr, val);
+ case 2:
+ return cpu_outw(env, addr, val);
+ case 4:
+ return cpu_outl(env, addr, val);
+ default:
+ fprintf(logfile, "outp: bad size: %lx %lx\n", addr, size);
+ exit(-1);
+ }
+}
+
+extern void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
+ int len, int is_write);
+
+static inline void read_physical(uint64_t addr, unsigned long size, void *val)
+{
+ return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 0);
+}
+
+static inline void write_physical(uint64_t addr, unsigned long size, void *val)
+{
+ return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 1);
+}
+
+void cpu_ioreq_pio(CPUState *env, ioreq_t *req)
+{
+ int i, sign;
+
+ sign = req->df ? -1 : 1;
+
+ if (req->dir == IOREQ_READ) {
+ if (!req->data_is_ptr) {
+ req->data = do_inp(env, req->addr, req->size);
+ } else {
+ unsigned long tmp;
+
+ for (i = 0; i < req->count; i++) {
+ tmp = do_inp(env, req->addr, req->size);
+ write_physical((target_phys_addr_t) req->data
+ + (sign * i * req->size),
+ req->size, &tmp);
+ }
+ }
+ } else if (req->dir == IOREQ_WRITE) {
+ if (!req->data_is_ptr) {
+ do_outp(env, req->addr, req->size, req->data);
+ } else {
+ for (i = 0; i < req->count; i++) {
+ unsigned long tmp = 0;
+
+ read_physical((target_phys_addr_t) req->data
+ + (sign * i * req->size),
+ req->size, &tmp);
+ do_outp(env, req->addr, req->size, tmp);
+ }
+ }
+ }
+}
+
+void cpu_ioreq_move(CPUState *env, ioreq_t *req)
+{
+ int i, sign;
+
+ sign = req->df ? -1 : 1;
+
+ if (!req->data_is_ptr) {
+ if (req->dir == IOREQ_READ) {
+ for (i = 0; i < req->count; i++) {
+ read_physical(req->addr
+ + (sign * i * req->size),
+ req->size, &req->data);
+ }
+ } else if (req->dir == IOREQ_WRITE) {
+ for (i = 0; i < req->count; i++) {
+ write_physical(req->addr
+ + (sign * i * req->size),
+ req->size, &req->data);
+ }
+ }
+ } else {
+ target_ulong tmp;
+
+ if (req->dir == IOREQ_READ) {
+ for (i = 0; i < req->count; i++) {
+ read_physical(req->addr
+ + (sign * i * req->size),
+ req->size, &tmp);
+ write_physical((target_phys_addr_t )req->data
+ + (sign * i * req->size),
+ req->size, &tmp);
+ }
+ } else if (req->dir == IOREQ_WRITE) {
+ for (i = 0; i < req->count; i++) {
+ read_physical((target_phys_addr_t) req->data
+ + (sign * i * req->size),
+ req->size, &tmp);
+ write_physical(req->addr
+ + (sign * i * req->size),
+ req->size, &tmp);
+ }
+ }
+ }
+}
+
+void timeoffset_get(void)
+{
+ char *p;
+
+ p = xenstore_vm_read(domid, "rtc/timeoffset", NULL);
+ if (!p)
+ return;
+
+ if (sscanf(p, "%ld", &time_offset) == 1)
+ fprintf(logfile, "Time offset set %ld\n", time_offset);
+ else
+ time_offset = 0;
+
+ xc_domain_set_time_offset(xc_handle, domid, time_offset);
+
+ free(p);
+}
+
+void cpu_ioreq_timeoffset(CPUState *env, ioreq_t *req)
+{
+ char b[64];
+
+ time_offset += (unsigned long)req->data;
+
+ fprintf(logfile, "Time offset set %ld, added offset %ld\n", time_offset, req->data);
+ sprintf(b, "%ld", time_offset);
+ xenstore_vm_write(domid, "rtc/timeoffset", b);
+}
+
+void __handle_ioreq(CPUState *env, ioreq_t *req)
+{
+ if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
+ (req->size < sizeof(target_ulong)))
+ req->data &= ((target_ulong)1 << (8 * req->size)) - 1;
+
+ switch (req->type) {
+ case IOREQ_TYPE_PIO:
+ cpu_ioreq_pio(env, req);
+ break;
+ case IOREQ_TYPE_COPY:
+ cpu_ioreq_move(env, req);
+ break;
+ case IOREQ_TYPE_TIMEOFFSET:
+ cpu_ioreq_timeoffset(env, req);
+ break;
+ case IOREQ_TYPE_INVALIDATE:
+ qemu_invalidate_map_cache();
+ break;
+ default:
+ hw_error("Invalid ioreq type 0x%x\n", req->type);
+ }
+}
+
+void __handle_buffered_iopage(CPUState *env)
+{
+ buf_ioreq_t *buf_req = NULL;
+ ioreq_t req;
+ int qw;
+
+ if (!buffered_io_page)
+ return;
+
+ while (buffered_io_page->read_pointer !=
+ buffered_io_page->write_pointer) {
+ buf_req = &buffered_io_page->buf_ioreq[
+ buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
+ req.size = 1UL << buf_req->size;
+ req.count = 1;
+ req.addr = buf_req->addr;
+ req.data = buf_req->data;
+ req.state = STATE_IOREQ_READY;
+ req.dir = buf_req->dir;
+ req.df = 1;
+ req.type = buf_req->type;
+ req.data_is_ptr = 0;
+ qw = (req.size == 8);
+ if (qw) {
+ buf_req = &buffered_io_page->buf_ioreq[
+ (buffered_io_page->read_pointer+1) % IOREQ_BUFFER_SLOT_NUM];
+ req.data |= ((uint64_t)buf_req->data) << 32;
+ }
+
+ __handle_ioreq(env, &req);
+
+ xen_mb();
+ buffered_io_page->read_pointer += qw ? 2 : 1;
+ }
+}
+
+void handle_buffered_io(void *opaque)
+{
+ CPUState *env = opaque;
+
+ __handle_buffered_iopage(env);
+ qemu_mod_timer(buffered_io_timer, BUFFER_IO_MAX_DELAY +
+ qemu_get_clock(rt_clock));
+}
+
+void cpu_handle_ioreq(void *opaque)
+{
+ extern int vm_running;
+ extern int shutdown_requested;
+ CPUState *env = opaque;
+ ioreq_t *req = cpu_get_ioreq();
+
+ handle_buffered_io(env);
+ if (req) {
+ __handle_ioreq(env, req);
+
+ if (req->state != STATE_IOREQ_INPROCESS) {
+ fprintf(logfile, "Badness in I/O request ... not in service?!: "
+ "%x, ptr: %x, port: %"PRIx64", "
+ "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
+ req->state, req->data_is_ptr, req->addr,
+ req->data, req->count, req->size);
+ destroy_hvm_domain();
+ return;
+ }
+
+ xen_wmb(); /* Update ioreq contents /then/ update state. */
+
+ /*
+ * We do this before we send the response so that the tools
+ * have the opportunity to pick up on the reset before the
+ * guest resumes and does a hlt with interrupts disabled which
+ * causes Xen to powerdown the domain.
+ */
+ if (vm_running) {
+ if (shutdown_requested) {
+ fprintf(logfile, "shutdown requested in cpu_handle_ioreq\n");
+ destroy_hvm_domain();
+ }
+ if (reset_requested) {
+ fprintf(logfile, "reset requested in cpu_handle_ioreq.\n");
+ qemu_system_reset();
+ reset_requested = 0;
+ }
+ }
+
+ req->state = STATE_IORESP_READY;
+ xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]);
+ }
+}
+
+int main_loop(void)
+{
+ extern int vm_running;
+ extern int shutdown_requested;
+ extern int suspend_requested;
+ CPUState *env = cpu_single_env;
+ int evtchn_fd = xce_handle == -1 ? -1 : xc_evtchn_fd(xce_handle);
+ char *qemu_file;
+ fd_set fds;
+
+ buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
+ cpu_single_env);
+ qemu_mod_timer(buffered_io_timer, qemu_get_clock(rt_clock));
+
+ if (evtchn_fd != -1)
+ qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
+
+ xenstore_record_dm_state("running");
+ while (1) {
+ while (!(vm_running && suspend_requested))
+ /* Wait up to 10 msec. */
+ main_loop_wait(10);
+
+ fprintf(logfile, "device model saving state\n");
+
+ /* Pull all outstanding ioreqs through the system */
+ handle_buffered_pio();
+ handle_buffered_io(env);
+ main_loop_wait(1); /* For the select() on events */
+
+ /* Save the device state */
+ asprintf(&qemu_file, "/var/lib/xen/qemu-save.%d", domid);
+ do_savevm(qemu_file);
+ free(qemu_file);
+
+ xenstore_record_dm_state("paused");
+
+ /* Wait to be allowed to continue */
+ while (suspend_requested) {
+ FD_ZERO(&fds);
+ FD_SET(xenstore_fd(), &fds);
+ if (select(xenstore_fd() + 1, &fds, NULL, NULL, NULL) > 0)
+ xenstore_process_event(NULL);
+ }
+
+ xenstore_record_dm_state("running");
+ }
+
+ return 0;
+}
+
+void destroy_hvm_domain(void)
+{
+ int xcHandle;
+ int sts;
+
+ xcHandle = xc_interface_open();
+ if (xcHandle < 0)
+ fprintf(logfile, "Cannot acquire xenctrl handle\n");
+ else {
+ sts = xc_domain_shutdown(xcHandle, domid, SHUTDOWN_poweroff);
+ if (sts != 0)
+ fprintf(logfile, "? xc_domain_shutdown failed to issue poweroff, "
+ "sts %d, errno %d\n", sts, errno);
+ else
+ fprintf(logfile, "Issued domain %d poweroff\n", domid);
+ xc_interface_close(xcHandle);
+ }
+}
--- /dev/null
+/* Xen 8259 stub for interrupt controller emulation
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ * Copyright (c) 2005 Intel corperation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "vl.h"
+#include "xenctrl.h"
+#include <xen/hvm/ioreq.h>
+#include <stdio.h>
+#include "cpu.h"
+#include "cpu-all.h"
+
+struct PicState2 {
+};
+
+void pic_set_irq_new(void *opaque, int irq, int level)
+{
+ xc_hvm_set_isa_irq_level(xc_handle, domid, irq, level);
+}
+
+/* obsolete function */
+void pic_set_irq(int irq, int level)
+{
+ pic_set_irq_new(isa_pic, irq, level);
+}
+
+void irq_info(void)
+{
+ term_printf("irq statistic code not compiled.\n");
+}
+
+void pic_info(void)
+{
+ term_printf("pic_info code not compiled.\n");
+}
+
+PicState2 *pic_init(IRQRequestFunc *irq_request, void *irq_request_opaque)
+{
+ PicState2 *s;
+ s = qemu_mallocz(sizeof(PicState2));
+ if (!s)
+ return NULL;
+ return s;
+}
+
+void pic_set_alt_irq_func(PicState2 *s, SetIRQFunc *alt_irq_func,
+ void *alt_irq_opaque)
+{
+}
--- /dev/null
+/*
+ * QEMU i440FX/PIIX3 PCI Bridge Emulation
+ *
+ * Copyright (c) 2006 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "vl.h"
+typedef uint32_t pci_addr_t;
+#include "hw/pci_host.h"
+
+typedef PCIHostState I440FXState;
+
+static void i440fx_addr_writel(void* opaque, uint32_t addr, uint32_t val)
+{
+ I440FXState *s = opaque;
+ s->config_reg = val;
+}
+
+static uint32_t i440fx_addr_readl(void* opaque, uint32_t addr)
+{
+ I440FXState *s = opaque;
+ return s->config_reg;
+}
+
+/* return the global irq number corresponding to a given device irq
+ pin. We could also use the bus number to have a more precise
+ mapping. */
+static int pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
+{
+#ifndef CONFIG_DM
+ int slot_addend;
+ slot_addend = (pci_dev->devfn >> 3) - 1;
+ return (irq_num + slot_addend) & 3;
+#else /* !CONFIG_DM */
+ return irq_num + ((pci_dev->devfn >> 3) << 2);
+#endif /* !CONFIG_DM */
+}
+
+static void i440fx_set_irq(void *pic, int irq_num, int level)
+{
+ xc_hvm_set_pci_intx_level(xc_handle, domid, 0, 0, irq_num >> 2,
+ irq_num & 3, level);
+}
+
+static void i440fx_save(QEMUFile* f, void *opaque)
+{
+ PCIDevice *d = opaque;
+ pci_device_save(d, f);
+#ifndef CONFIG_DM
+ qemu_put_8s(f, &smm_enabled);
+#endif /* !CONFIG_DM */
+}
+
+static int i440fx_load(QEMUFile* f, void *opaque, int version_id)
+{
+ PCIDevice *d = opaque;
+ int ret;
+
+ if (version_id != 1)
+ return -EINVAL;
+ ret = pci_device_load(d, f);
+ if (ret < 0)
+ return ret;
+#ifndef CONFIG_DM
+ i440fx_update_memory_mappings(d);
+ qemu_get_8s(f, &smm_enabled);
+#endif /* !CONFIG_DM */
+ return 0;
+}
+
+PCIBus *i440fx_init(PCIDevice **pi440fx_state)
+{
+ PCIBus *b;
+ PCIDevice *d;
+ I440FXState *s;
+
+ s = qemu_mallocz(sizeof(I440FXState));
+ b = pci_register_bus(i440fx_set_irq, pci_slot_get_pirq, NULL, 0, 128);
+ s->bus = b;
+
+ register_ioport_write(0xcf8, 4, 4, i440fx_addr_writel, s);
+ register_ioport_read(0xcf8, 4, 4, i440fx_addr_readl, s);
+
+ register_ioport_write(0xcfc, 4, 1, pci_host_data_writeb, s);
+ register_ioport_write(0xcfc, 4, 2, pci_host_data_writew, s);
+ register_ioport_write(0xcfc, 4, 4, pci_host_data_writel, s);
+ register_ioport_read(0xcfc, 4, 1, pci_host_data_readb, s);
+ register_ioport_read(0xcfc, 4, 2, pci_host_data_readw, s);
+ register_ioport_read(0xcfc, 4, 4, pci_host_data_readl, s);
+
+ d = pci_register_device(b, "i440FX", sizeof(PCIDevice), 0,
+ NULL, NULL);
+
+ d->config[0x00] = 0x86; // vendor_id
+ d->config[0x01] = 0x80;
+ d->config[0x02] = 0x37; // device_id
+ d->config[0x03] = 0x12;
+ d->config[0x08] = 0x02; // revision
+ d->config[0x0a] = 0x00; // class_sub = host2pci
+ d->config[0x0b] = 0x06; // class_base = PCI_bridge
+ d->config[0x0e] = 0x00; // header_type
+
+ register_savevm("I440FX", 0, 1, i440fx_save, i440fx_load, d);
+ *pi440fx_state = d;
+ return b;
+}
+
+/* PIIX3 PCI to ISA bridge */
+
+static PCIDevice *piix3_dev;
+
+static void piix3_write_config(PCIDevice *d,
+ uint32_t address, uint32_t val, int len)
+{
+ int i;
+
+ /* Scan for updates to PCI link routes (0x60-0x63). */
+ for (i = 0; i < len; i++) {
+ uint8_t v = (val >> (8*i)) & 0xff;
+ if (v & 0x80)
+ v = 0;
+ v &= 0xf;
+ if (((address+i) >= 0x60) && ((address+i) <= 0x63))
+ xc_hvm_set_pci_link_route(xc_handle, domid, address + i - 0x60, v);
+ }
+
+ /* Hand off to default logic. */
+ pci_default_write_config(d, address, val, len);
+}
+
+static void piix3_reset(PCIDevice *d)
+{
+ uint8_t *pci_conf = d->config;
+
+ pci_conf[0x04] = 0x07; // master, memory and I/O
+ pci_conf[0x05] = 0x00;
+ pci_conf[0x06] = 0x00;
+ pci_conf[0x07] = 0x02; // PCI_status_devsel_medium
+ pci_conf[0x4c] = 0x4d;
+ pci_conf[0x4e] = 0x03;
+ pci_conf[0x4f] = 0x00;
+ pci_conf[0x60] = 0x80;
+ pci_conf[0x61] = 0x80;
+ pci_conf[0x62] = 0x80;
+ pci_conf[0x63] = 0x80;
+ pci_conf[0x69] = 0x02;
+ pci_conf[0x70] = 0x80;
+ pci_conf[0x76] = 0x0c;
+ pci_conf[0x77] = 0x0c;
+ pci_conf[0x78] = 0x02;
+ pci_conf[0x79] = 0x00;
+ pci_conf[0x80] = 0x00;
+ pci_conf[0x82] = 0x00;
+ pci_conf[0xa0] = 0x08;
+ pci_conf[0xa2] = 0x00;
+ pci_conf[0xa3] = 0x00;
+ pci_conf[0xa4] = 0x00;
+ pci_conf[0xa5] = 0x00;
+ pci_conf[0xa6] = 0x00;
+ pci_conf[0xa7] = 0x00;
+ pci_conf[0xa8] = 0x0f;
+ pci_conf[0xaa] = 0x00;
+ pci_conf[0xab] = 0x00;
+ pci_conf[0xac] = 0x00;
+ pci_conf[0xae] = 0x00;
+}
+
+static void piix_save(QEMUFile* f, void *opaque)
+{
+ PCIDevice *d = opaque;
+ pci_device_save(d, f);
+}
+
+static int piix_load(QEMUFile* f, void *opaque, int version_id)
+{
+ PCIDevice *d = opaque;
+ if (version_id != 2)
+ return -EINVAL;
+ return pci_device_load(d, f);
+}
+
+int piix3_init(PCIBus *bus, int devfn)
+{
+ PCIDevice *d;
+ uint8_t *pci_conf;
+
+ d = pci_register_device(bus, "PIIX3", sizeof(PCIDevice),
+ devfn, NULL, piix3_write_config);
+ register_savevm("PIIX3", 0, 2, piix_save, piix_load, d);
+
+ piix3_dev = d;
+ pci_conf = d->config;
+
+ pci_conf[0x00] = 0x86; // Intel
+ pci_conf[0x01] = 0x80;
+ pci_conf[0x02] = 0x00; // 82371SB PIIX3 PCI-to-ISA bridge (Step A1)
+ pci_conf[0x03] = 0x70;
+ pci_conf[0x0a] = 0x01; // class_sub = PCI_ISA
+ pci_conf[0x0b] = 0x06; // class_base = PCI_bridge
+ pci_conf[0x0e] = 0x80; // header_type = PCI_multifunction, generic
+
+ piix3_reset(d);
+ return d->devfn;
+}
--- /dev/null
+/*
+ * QEMU MC146818 RTC emulation
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "vl.h"
+
+// #define DEBUG_CMOS
+
+struct RTCState {
+ uint8_t cmos_data[128];
+ uint8_t cmos_index;
+};
+
+static inline int to_bcd(RTCState *s, int a)
+{
+ return ((a / 10) << 4) | (a % 10);
+}
+
+void rtc_set_memory(RTCState *s, int addr, int val)
+{
+ if (addr >= 0 && addr <= 127)
+ s->cmos_data[addr] = val;
+}
+
+static void cmos_ioport_write(void *opaque, uint32_t addr, uint32_t data)
+{
+ RTCState *s = opaque;
+
+ if ((addr & 1) == 0) {
+ s->cmos_index = data & 0x7f;
+ } else {
+#ifdef DEBUG_CMOS
+ printf("cmos: write index=0x%02x val=0x%02x\n",
+ s->cmos_index, data);
+#endif
+ s->cmos_data[s->cmos_index] = data;
+ }
+}
+
+static uint32_t cmos_ioport_read(void *opaque, uint32_t addr)
+{
+ RTCState *s = opaque;
+ int ret;
+ if ((addr & 1) == 0) {
+ return 0xff;
+ } else {
+ ret = s->cmos_data[s->cmos_index];
+#ifdef DEBUG_CMOS
+ printf("cmos: read index=0x%02x val=0x%02x\n",
+ s->cmos_index, ret);
+#endif
+ return ret;
+ }
+}
+
+static void rtc_save(QEMUFile *f, void *opaque)
+{
+ RTCState *s = opaque;
+
+ qemu_put_buffer(f, s->cmos_data, 128);
+ qemu_put_8s(f, &s->cmos_index);
+}
+
+static int rtc_load(QEMUFile *f, void *opaque, int version_id)
+{
+ RTCState *s = opaque;
+
+ if (version_id != 1)
+ return -EINVAL;
+
+ qemu_get_buffer(f, s->cmos_data, 128);
+ qemu_get_8s(f, &s->cmos_index);
+
+ return 0;
+}
+
+RTCState *rtc_init(int base, int irq)
+{
+ RTCState *s;
+ time_t ti;
+ struct tm *tm;
+ int val;
+
+ s = qemu_mallocz(sizeof(RTCState));
+ if (!s)
+ return NULL;
+
+/* PC cmos mappings */
+#define REG_IBM_CENTURY_BYTE 0x32
+#define REG_IBM_PS2_CENTURY_BYTE 0x37
+ time(&ti);
+ tm = gmtime(&ti); /* XXX localtime and update from guest? */
+ val = to_bcd(s, (tm->tm_year / 100) + 19);
+ rtc_set_memory(s, REG_IBM_CENTURY_BYTE, val);
+ rtc_set_memory(s, REG_IBM_PS2_CENTURY_BYTE, val);
+
+ register_ioport_write(base, 2, 1, cmos_ioport_write, s);
+ register_ioport_read(base, 2, 1, cmos_ioport_read, s);
+
+ register_savevm("mc146818rtc", base, 1, rtc_save, rtc_load, s);
+ return s;
+}
+
+void rtc_set_date(RTCState *s, const struct tm *tm) {}