recurse-all: $(patsubst %,subdir-%, $(TARGET_DIRS))
+tapdisk-ioemu: CPPFLAGS += -I$(XEN_ROOT)/tools/libxc
+tapdisk-ioemu: CPPFLAGS += -I$(XEN_ROOT)/tools/blktap/lib
+tapdisk-ioemu: CPPFLAGS += -I$(XEN_ROOT)/tools/xenstore
+tapdisk-ioemu: CPPFLAGS += -I$(XEN_ROOT)/tools/include
+tapdisk-ioemu: tapdisk-ioemu.c cutils.c block.c block-raw.c block-cow.c block-qcow.c aes.c block-vmdk.c block-cloop.c block-dmg.c block-bochs.c block-vpc.c block-vvfat.c block-qcow2.c hw/xen_blktap.c osdep.c
+ $(CC) -DQEMU_TOOL $(CFLAGS) $(CPPFLAGS) $(BASE_CFLAGS) $(LDFLAGS) $(BASE_LDFLAGS) -o $@ $^ -lz $(LIBS)
+
#######################################################################
# BLOCK_OBJS is code used by both qemu system emulation and qemu-img
return 0;
}
-static void cow_flush(BlockDriverState *bs)
+static int cow_flush(BlockDriverState *bs)
{
BDRVCowState *s = bs->opaque;
fsync(s->fd);
+ return 0;
}
BlockDriver bdrv_cow = {
.bdrv_aio_read = raw_aio_read,
.bdrv_aio_write = raw_aio_write,
.bdrv_aio_cancel = raw_aio_cancel,
+ .bdrv_aio_flush = raw_aio_flush,
.aiocb_size = sizeof(RawAIOCB),
.protocol_name = "file",
.bdrv_pread = raw_pread,
typedef struct BDRVVbdState {
struct blkfront_dev *dev;
int fd;
- int type;
- int mode;
- int info;
- uint64_t sectors;
- unsigned sector_size;
+ struct blkfront_info info;
QEMU_LIST_ENTRY(BDRVVbdState) list;
} BDRVVbdState;
//handy to test posix access
//return -EIO;
- s->dev = init_blkfront((char *) filename, &s->sectors, &s->sector_size, &s->mode, &s->info);
+ s->dev = init_blkfront((char *) filename, &s->info);
if (!s->dev)
return -EIO;
- if (SECTOR_SIZE % s->sector_size) {
- printf("sector size is %d, we only support sector sizes that divide %d\n", s->sector_size, SECTOR_SIZE);
+ if (SECTOR_SIZE % s->info.sector_size) {
+ printf("sector size is %d, we only support sector sizes that divide %d\n", s->info.sector_size, SECTOR_SIZE);
return -EIO;
}
// Try to cancel. If can't, wait for it, drop the callback and call qemu_aio_release(acb)
}
+static void vbd_nop_cb(void *opaque, int ret)
+{
+}
+
+static BlockDriverAIOCB *vbd_aio_flush(BlockDriverState *bs,
+ BlockDriverCompletionFunc *cb, void *opaque)
+{
+ BDRVVbdState *s = bs->opaque;
+ VbdAIOCB *acb = NULL;
+
+ if (s->info.barrier == 1) {
+ acb = vbd_aio_setup(bs, 0, NULL, 0,
+ s->info.flush == 1 ? vbd_nop_cb : cb, opaque);
+ if (!acb)
+ return NULL;
+ blkfront_aio_push_operation(&acb->aiocb, BLKIF_OP_WRITE_BARRIER);
+ }
+ if (s->info.flush == 1) {
+ acb = vbd_aio_setup(bs, 0, NULL, 0, cb, opaque);
+ if (!acb)
+ return NULL;
+ blkfront_aio_push_operation(&acb->aiocb, BLKIF_OP_FLUSH_DISKCACHE);
+ }
+ return &acb->common;
+}
+
static void vbd_close(BlockDriverState *bs)
{
BDRVVbdState *s = bs->opaque;
static int64_t vbd_getlength(BlockDriverState *bs)
{
BDRVVbdState *s = bs->opaque;
- return s->sectors * s->sector_size;
+ return s->info.sectors * s->info.sector_size;
}
-static void vbd_flush(BlockDriverState *bs)
+static int vbd_flush(BlockDriverState *bs)
{
BDRVVbdState *s = bs->opaque;
blkfront_sync(s->dev);
+ return 0;
}
/***********************************************/
.bdrv_aio_read = vbd_aio_read,
.bdrv_aio_write = vbd_aio_write,
.bdrv_aio_cancel = vbd_aio_cancel,
+ .bdrv_aio_flush = vbd_aio_flush,
.aiocb_size = sizeof(VbdAIOCB),
.bdrv_read = vbd_read,
.bdrv_write = vbd_write,
}
#endif
+static int bdrv_invalid_protocol_open(BlockDriverState *bs,
+ const char *filename, int flags) {
+ return -ENOENT;
+}
+
+static BlockDriver bdrv_invalid_protocol = {
+ "invalid_protocol",
+ .bdrv_open = bdrv_invalid_protocol_open,
+};
+
static BlockDriver *find_protocol(const char *filename)
{
+ /* Return values:
+ * &bdrv_xxx
+ * filename specifies protocol xxx
+ * caller should use that
+ * NULL filename does not specify any protocol
+ * caller may apply their own default
+ * &bdrv_invalid_protocol filename speciies an unknown protocol
+ * caller should return -ENOENT; or may just try to open with
+ * that bdrv, which always fails that way.
+ */
BlockDriver *drv1;
char protocol[128];
int len;
#endif
p = strchr(filename, ':');
if (!p)
- return &bdrv_raw;
+ return NULL;
len = p - filename;
if (len > sizeof(protocol) - 1)
len = sizeof(protocol) - 1;
!strcmp(drv1->protocol_name, protocol))
return drv1;
}
- return NULL;
+ return &bdrv_invalid_protocol;
}
/* XXX: force raw format if block or character device ? It would
#endif
drv = find_protocol(filename);
- /* no need to test disk image formats for vvfat */
- if (drv == &bdrv_vvfat)
+ /* no need to test disk image format if the filename told us */
+ if (drv != NULL)
return drv;
ret = bdrv_file_open(&bs, filename, BDRV_O_RDONLY);
if (flags & BDRV_O_FILE) {
drv = find_protocol(filename);
if (!drv)
- return -ENOENT;
+ drv = &bdrv_raw;
} else {
if (!drv) {
drv = find_image_format(filename);
}
path_combine(backing_filename, sizeof(backing_filename),
filename, bs->backing_file);
- if (bdrv_open(bs->backing_hd, backing_filename, 0) < 0)
+ if (bdrv_open2(bs->backing_hd, backing_filename, 0, &bdrv_raw) < 0)
goto fail;
}
return buf;
}
+BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
+ BlockDriverCompletionFunc *cb, void *opaque)
+{
+ BlockDriver *drv = bs->drv;
+
+ if (!drv)
+ return NULL;
+
+ return drv->bdrv_aio_flush(bs, cb, opaque);
+}
+
/**************************************************************/
/* async I/Os */
;;
--disable-sdl) sdl="no"
;;
+ --disable-opengl) opengl="no"
+ ;;
--enable-coreaudio) coreaudio="yes"
;;
--enable-alsa) alsa="yes"
vnc_tls_libs=`pkg-config --libs gnutls`
fi
+##########################################
+# OpenGL test
+
+if test -z "$opengl" && test "$sdl" = "yes"
+then
+cat > $TMPC << EOF
+#include <SDL_opengl.h>
+#ifndef GL_TEXTURE_RECTANGLE_ARB
+#error "Opengl doesn't support GL_TEXTURE_RECTANGLE_ARB"
+#endif
+int main( void ) { return (int) glGetString(GL_EXTENSIONS); }
+EOF
+if $cc -o $TMPE `$sdl_config --cflags --libs 2> /dev/null` -I/usr/include/GL $TMPC -lXext -lGL 2> /dev/null
+then
+opengl="yes"
+else
+opengl="no"
+fi
+fi
+
##########################################
# alsa sound support libraries
/* set to 1 set disable mult support */
#define MAX_MULT_SECTORS 16
+#ifdef CONFIG_STUBDOM
+#include <xen/io/blkif.h>
+#define IDE_DMA_BUF_SIZE (BLKIF_MAX_SEGMENTS_PER_REQUEST * TARGET_PAGE_SIZE)
+#else
+#define IDE_DMA_BUF_SIZE 131072
+#endif
+#if (IDE_DMA_BUF_SIZE < MAX_MULT_SECTORS * 512)
+#error "IDE_DMA_BUF_SIZE must be bigger or equal to MAX_MULT_SECTORS * 512"
+#endif
#ifdef CONFIG_STUBDOM
#include <xen/io/blkif.h>
#define ASC_ILLEGAL_OPCODE 0x20
#define ASC_LOGICAL_BLOCK_OOR 0x21
#define ASC_INV_FIELD_IN_CMD_PACKET 0x24
+#define ASC_MEDIUM_MAY_HAVE_CHANGED 0x28
#define ASC_MEDIUM_NOT_PRESENT 0x3a
#define ASC_SAVING_PARAMETERS_NOT_SUPPORTED 0x39
if (!s->bs) return; /* yikes */
+ if (!s->bs) return; /* yikes */
+
n = s->io_buffer_size >> 9;
sector_num = ide_get_sector(s);
if (n > 0) {
if (!s->bs) return; /* yikes */
+ if (!s->bs) return; /* yikes */
+
n = s->io_buffer_size >> 9;
sector_num = ide_get_sector(s);
if (n > 0) {
ide_set_irq(s);
}
+static void ide_device_utterly_broken(IDEState *s) {
+ s->status |= BUSY_STAT;
+ s->bs = NULL;
+ /* This prevents all future commands from working. All of the
+ * asynchronous callbacks (and ide_set_irq, as a safety measure)
+ * check to see whether this has happened and bail if so.
+ */
+}
+
+static void ide_flush_cb(void *opaque, int ret)
+{
+ IDEState *s = opaque;
+
+ if (!s->bs) return; /* yikes */
+
+ if (ret) {
+ /* We are completely doomed. The IDE spec does not permit us
+ * to return an error from a flush except via a protocol which
+ * requires us to say where the error is and which
+ * contemplates the guest repeating the flush attempt to
+ * attempt flush the remaining data. We can't support that
+ * because f(data)sync (which is what the block drivers use
+ * eventually) doesn't report the necessary information or
+ * give us the necessary control. So we make the disk vanish.
+ */
+ ide_device_utterly_broken(s);
+ return;
+ }
+ else
+ s->status = READY_STAT;
+ ide_set_irq(s);
+}
+
static void ide_atapi_cmd_ok(IDEState *s)
{
s->error = 0;
if (!s->bs) return; /* yikes */
+ if (!s->bs) return; /* yikes */
+
if (ret < 0) {
ide_atapi_io_error(s, ret);
goto eot;
switch(s->io_buffer[0]) {
case GPCMD_TEST_UNIT_READY:
if (bdrv_is_inserted(s->bs)) {
+ if (s->is_cdrom && s->sense_key == SENSE_NOT_READY) {
+ ide_atapi_cmd_error(s, SENSE_UNIT_ATTENTION,
+ ASC_MEDIUM_MAY_HAVE_CHANGED);
+ break;
+ }
ide_atapi_cmd_ok(s);
} else {
ide_atapi_cmd_error(s, SENSE_NOT_READY,
if (!s->bs) return; /* yikes */
+ if (!s->bs) return; /* yikes */
+
/* XXX: send interrupt too */
bdrv_get_geometry(s->bs, &nb_sectors);
s->nb_sectors = nb_sectors;
--- /dev/null
+/*
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Jiang Yunhong <yunhong.jiang@intel.com>
+ *
+ * This file implements direct PCI assignment to a HVM guest
+ */
+
+#include "pt-msi.h"
+#include <sys/mman.h>
+
+/* MSI virtuailization functions */
+#define PT_MSI_CTRL_WR_MASK_HI (0x1)
+#define PT_MSI_CTRL_WR_MASK_LO (0x8E)
+#define PT_MSI_DATA_WR_MASK (0x38)
+int pt_msi_init(struct pt_dev *dev, int pos)
+{
+ uint8_t id;
+ uint16_t flags;
+ struct pci_dev *pd = dev->pci_dev;
+ PCIDevice *d = (struct PCIDevice *)dev;
+
+ id = pci_read_byte(pd, pos + PCI_CAP_LIST_ID);
+
+ if ( id != PCI_CAP_ID_MSI )
+ {
+ PT_LOG("pt_msi_init: error id %x pos %x\n", id, pos);
+ return -1;
+ }
+
+ dev->msi = malloc(sizeof(struct pt_msi_info));
+ if ( !dev->msi )
+ {
+ PT_LOG("pt_msi_init: error allocation pt_msi_info\n");
+ return -1;
+ }
+ memset(dev->msi, 0, sizeof(struct pt_msi_info));
+
+ dev->msi->offset = pos;
+ dev->msi->size = 0xa;
+
+ flags = pci_read_byte(pd, pos + PCI_MSI_FLAGS);
+ if ( flags & PCI_MSI_FLAGS_ENABLE )
+ {
+ PT_LOG("pt_msi_init: MSI enabled already, disable first\n");
+ pci_write_byte(pd, pos + PCI_MSI_FLAGS, flags & ~PCI_MSI_FLAGS_ENABLE);
+ }
+ dev->msi->flags |= (flags | MSI_FLAG_UNINIT);
+
+ if ( flags & PCI_MSI_FLAGS_64BIT )
+ dev->msi->size += 4;
+ if ( flags & PCI_MSI_FLAGS_PVMASK )
+ dev->msi->size += 10;
+
+ /* All register is 0 after reset, except first 4 byte */
+ *(uint32_t *)(&d->config[pos]) = pci_read_long(pd, pos);
+ d->config[pos + 2] &= PT_MSI_CTRL_WR_MASK_LO;
+ d->config[pos + 3] &= PT_MSI_CTRL_WR_MASK_HI;
+
+ return 0;
+}
+
+/*
+ * setup physical msi, but didn't enable it
+ */
+static int pt_msi_setup(struct pt_dev *dev)
+{
+ int pirq = -1;
+
+ if ( !(dev->msi->flags & MSI_FLAG_UNINIT) )
+ {
+ PT_LOG("setup physical after initialized?? \n");
+ return -1;
+ }
+
+ if ( xc_physdev_map_pirq_msi(xc_handle, domid, MAP_PIRQ_TYPE_MSI,
+ AUTO_ASSIGN, &pirq,
+ dev->pci_dev->dev << 3 | dev->pci_dev->func,
+ dev->pci_dev->bus, 0, 1) )
+ {
+ PT_LOG("error map msi\n");
+ return -1;
+ }
+ dev->msi->pirq = pirq;
+ PT_LOG("msi mapped with pirq %x\n", pirq);
+
+ return 0;
+}
+
+/*
+ * caller should make sure mask is supported
+ */
+static uint32_t get_msi_gmask(struct pt_dev *d)
+{
+ struct PCIDevice *pd = (struct PCIDevice *)d;
+
+ if ( d->msi->flags & PCI_MSI_FLAGS_64BIT )
+ return *(uint32_t *)(pd->config + d->msi->offset + 0xc);
+ else
+ return *(uint32_t *)(pd->config + d->msi->offset + 0x10);
+
+}
+
+static uint16_t get_msi_gdata(struct pt_dev *d)
+{
+ struct PCIDevice *pd = (struct PCIDevice *)d;
+
+ if ( d->msi->flags & PCI_MSI_FLAGS_64BIT )
+ return *(uint16_t *)(pd->config + d->msi->offset + PCI_MSI_DATA_64);
+ else
+ return *(uint16_t *)(pd->config + d->msi->offset + PCI_MSI_DATA_32);
+}
+
+static uint64_t get_msi_gaddr(struct pt_dev *d)
+{
+ struct PCIDevice *pd = (struct PCIDevice *)d;
+ uint32_t addr_hi;
+ uint64_t addr = 0;
+
+ addr =(uint64_t)(*(uint32_t *)(pd->config +
+ d->msi->offset + PCI_MSI_ADDRESS_LO));
+
+ if ( d->msi->flags & PCI_MSI_FLAGS_64BIT )
+ {
+ addr_hi = *(uint32_t *)(pd->config + d->msi->offset
+ + PCI_MSI_ADDRESS_HI);
+ addr |= (uint64_t)addr_hi << 32;
+ }
+ return addr;
+}
+
+static uint8_t get_msi_gctrl(struct pt_dev *d)
+{
+ struct PCIDevice *pd = (struct PCIDevice *)d;
+
+ return *(uint8_t *)(pd->config + d->msi->offset + PCI_MSI_FLAGS);
+}
+
+static uint32_t __get_msi_gflags(uint32_t data, uint64_t addr)
+{
+ uint32_t result = 0;
+ int rh, dm, dest_id, deliv_mode, trig_mode;
+
+ rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1;
+ dm = (addr >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
+ dest_id = (addr >> MSI_TARGET_CPU_SHIFT) & 0xff;
+ deliv_mode = (data >> MSI_DATA_DELIVERY_SHIFT) & 0x7;
+ trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
+
+ result |= dest_id | (rh << GFLAGS_SHIFT_RH) | (dm << GFLAGS_SHIFT_DM) | \
+ (deliv_mode << GLFAGS_SHIFT_DELIV_MODE) |
+ (trig_mode << GLFAGS_SHIFT_TRG_MODE);
+
+ return result;
+}
+
+static uint32_t get_msi_gflags(struct pt_dev *d)
+{
+ uint16_t data = get_msi_gdata(d);
+ uint64_t addr = get_msi_gaddr(d);
+
+ return __get_msi_gflags(data, addr);
+}
+
+/*
+ * This may be arch different
+ */
+static inline uint8_t get_msi_gvec(struct pt_dev *d)
+{
+ return get_msi_gdata(d) & 0xff;
+}
+
+/*
+ * Update msi mapping, usually called when MSI enabled,
+ * except the first time
+ */
+static int pt_msi_update(struct pt_dev *d)
+{
+ PT_LOG("now update msi with pirq %x gvec %x\n",
+ d->msi->pirq, get_msi_gvec(d));
+ return xc_domain_update_msi_irq(xc_handle, domid, get_msi_gvec(d),
+ d->msi->pirq, get_msi_gflags(d));
+}
+
+static int pt_msi_enable(struct pt_dev *d, int enable)
+{
+ uint16_t ctrl;
+ struct pci_dev *pd = d->pci_dev;
+
+ if ( !pd )
+ return -1;
+
+ ctrl = pci_read_word(pd, d->msi->offset + PCI_MSI_FLAGS);
+
+ if ( enable )
+ ctrl |= PCI_MSI_FLAGS_ENABLE;
+ else
+ ctrl &= ~PCI_MSI_FLAGS_ENABLE;
+
+ pci_write_word(pd, d->msi->offset + PCI_MSI_FLAGS, ctrl);
+ return 0;
+}
+
+static int pt_msi_control_update(struct pt_dev *d, uint16_t old_ctrl)
+{
+ uint16_t new_ctrl;
+ PCIDevice *pd = (PCIDevice *)d;
+
+ new_ctrl = get_msi_gctrl(d);
+
+ PT_LOG("old_ctrl %x new_Ctrl %x\n", old_ctrl, new_ctrl);
+
+ if ( new_ctrl & PCI_MSI_FLAGS_ENABLE )
+ {
+ if ( d->msi->flags & MSI_FLAG_UNINIT )
+ {
+ /* Init physical one */
+ PT_LOG("setup msi for dev %x\n", pd->devfn);
+ if ( pt_msi_setup(d) )
+ {
+ PT_LOG("pt_msi_setup error!!!\n");
+ return -1;
+ }
+ pt_msi_update(d);
+
+ d->msi->flags &= ~MSI_FLAG_UNINIT;
+ d->msi->flags |= PT_MSI_MAPPED;
+
+ /* Enable physical MSI only after bind */
+ pt_msi_enable(d, 1);
+ }
+ else if ( !(old_ctrl & PCI_MSI_FLAGS_ENABLE) )
+ pt_msi_enable(d, 1);
+ }
+ else if ( old_ctrl & PCI_MSI_FLAGS_ENABLE )
+ pt_msi_enable(d, 0);
+
+ /* Currently no support for multi-vector */
+ if ( (new_ctrl & PCI_MSI_FLAGS_QSIZE) != 0x0 )
+ PT_LOG("try to set more than 1 vector ctrl %x\n", new_ctrl);
+
+ return 0;
+}
+
+static int
+pt_msi_map_update(struct pt_dev *d, uint32_t old_data, uint64_t old_addr)
+{
+ uint32_t data;
+ uint64_t addr;
+
+ data = get_msi_gdata(d);
+ addr = get_msi_gaddr(d);
+
+ PT_LOG("old_data %x old_addr %lx data %x addr %lx\n",
+ old_data, old_addr, data, addr);
+
+ if ( data != old_data || addr != old_addr )
+ if ( get_msi_gctrl(d) & PCI_MSI_FLAGS_ENABLE )
+ pt_msi_update(d);
+
+ return 0;
+}
+
+static int pt_msi_mask_update(struct pt_dev *d, uint32_t old_mask)
+{
+ struct pci_dev *pd = d->pci_dev;
+ uint32_t mask;
+ int offset;
+
+ if ( !(d->msi->flags & PCI_MSI_FLAGS_PVMASK) )
+ return -1;
+
+ mask = get_msi_gmask(d);
+
+ if ( d->msi->flags & PCI_MSI_FLAGS_64BIT )
+ offset = d->msi->offset + 0xc;
+ else
+ offset = d->msi->offset + 0x10;
+
+ if ( old_mask != mask )
+ pci_write_long(pd, offset, mask);
+
+ return 0;
+}
+
+#define ACCESSED_DATA 0x2
+#define ACCESSED_MASK 0x4
+#define ACCESSED_ADDR 0x8
+#define ACCESSED_CTRL 0x10
+
+int pt_msi_write(struct pt_dev *d, uint32_t addr, uint32_t val, uint32_t len)
+{
+ struct pci_dev *pd;
+ int i, cur = addr;
+ uint8_t value, flags = 0;
+ uint16_t old_ctrl = 0, old_data = 0;
+ uint32_t old_mask = 0;
+ uint64_t old_addr = 0;
+ PCIDevice *dev = (PCIDevice *)d;
+ int can_write = 1;
+
+ if ( !d || !d->msi )
+ return 0;
+
+ if ( (addr >= (d->msi->offset + d->msi->size) ) ||
+ (addr + len) < d->msi->offset)
+ return 0;
+
+ PT_LOG("addr %x val %x len %x offset %x size %x\n",
+ addr, val, len, d->msi->offset, d->msi->size);
+
+ pd = d->pci_dev;
+ old_ctrl = get_msi_gctrl(d);
+ old_addr = get_msi_gaddr(d);
+ old_data = get_msi_gdata(d);
+
+ if ( d->msi->flags & PCI_MSI_FLAGS_PVMASK )
+ old_mask = get_msi_gmask(d);
+
+ for ( i = 0; i < len; i++, cur++ )
+ {
+ int off;
+ uint8_t orig_value;
+
+ if ( cur < d->msi->offset )
+ continue;
+ else if ( cur >= (d->msi->offset + d->msi->size) )
+ break;
+
+ off = cur - d->msi->offset;
+ value = (val >> (i * 8)) & 0xff;
+
+ switch ( off )
+ {
+ case 0x0 ... 0x1:
+ can_write = 0;
+ break;
+ case 0x2:
+ case 0x3:
+ flags |= ACCESSED_CTRL;
+
+ orig_value = pci_read_byte(pd, d->msi->offset + off);
+
+ orig_value &= (off == 2) ? PT_MSI_CTRL_WR_MASK_LO:
+ PT_MSI_CTRL_WR_MASK_HI;
+
+ orig_value |= value & ( (off == 2) ? ~PT_MSI_CTRL_WR_MASK_LO:
+ ~PT_MSI_CTRL_WR_MASK_HI);
+ value = orig_value;
+ break;
+ case 0x4 ... 0x7:
+ flags |= ACCESSED_ADDR;
+ /* bit 4 ~ 11 is reserved for MSI in x86 */
+ if ( off == 0x4 )
+ value &= 0x0f;
+ if ( off == 0x5 )
+ value &= 0xf0;
+ break;
+ case 0x8 ... 0xb:
+ if ( d->msi->flags & PCI_MSI_FLAGS_64BIT )
+ {
+ /* Up 32bit is reserved in x86 */
+ flags |= ACCESSED_ADDR;
+ if ( value )
+ PT_LOG("Write up32 addr with %x \n", value);
+ }
+ else
+ {
+ if ( off == 0xa || off == 0xb )
+ can_write = 0;
+ else
+ flags |= ACCESSED_DATA;
+ if ( off == 0x9 )
+ value &= ~PT_MSI_DATA_WR_MASK;
+ }
+ break;
+ case 0xc ... 0xf:
+ if ( d->msi->flags & PCI_MSI_FLAGS_64BIT )
+ {
+ if ( off == 0xe || off == 0xf )
+ can_write = 0;
+ else
+ {
+ flags |= ACCESSED_DATA;
+ if (off == 0xd)
+ value &= ~PT_MSI_DATA_WR_MASK;
+ }
+ }
+ else
+ {
+ if ( d->msi->flags & PCI_MSI_FLAGS_PVMASK )
+ flags |= ACCESSED_MASK;
+ else
+ PT_LOG("why comes to MASK without mask support??\n");
+ }
+ break;
+ case 0x10 ... 0x13:
+ if ( d->msi->flags & PCI_MSI_FLAGS_64BIT )
+ {
+ if ( d->msi->flags & PCI_MSI_FLAGS_PVMASK )
+ flags |= ACCESSED_MASK;
+ else
+ PT_LOG("why comes to MASK without mask support??\n");
+ }
+ else
+ can_write = 0;
+ break;
+ case 0x14 ... 0x18:
+ can_write = 0;
+ break;
+ default:
+ PT_LOG("Non MSI register!!!\n");
+ break;
+ }
+
+ if ( can_write )
+ dev->config[cur] = value;
+ }
+
+ if ( flags & ACCESSED_DATA || flags & ACCESSED_ADDR )
+ pt_msi_map_update(d, old_data, old_addr);
+
+ if ( flags & ACCESSED_MASK )
+ pt_msi_mask_update(d, old_mask);
+
+ /* This will enable physical one, do it in last step */
+ if ( flags & ACCESSED_CTRL )
+ pt_msi_control_update(d, old_ctrl);
+
+ return 1;
+}
+
+int pt_msi_read(struct pt_dev *d, int addr, int len, uint32_t *val)
+{
+ int e_addr = addr, e_len = len, offset = 0, i;
+ uint8_t e_val = 0;
+ PCIDevice *pd = (PCIDevice *)d;
+
+ if ( !d || !d->msi )
+ return 0;
+
+ if ( (addr > (d->msi->offset + d->msi->size) ) ||
+ (addr + len) <= d->msi->offset )
+ return 0;
+
+ PT_LOG("pt_msi_read addr %x len %x val %x offset %x size %x\n",
+ addr, len, *val, d->msi->offset, d->msi->size);
+
+ if ( (addr + len ) > (d->msi->offset + d->msi->size) )
+ e_len -= addr + len - d->msi->offset - d->msi->size;
+
+ if ( addr < d->msi->offset )
+ {
+ e_addr = d->msi->offset;
+ offset = d->msi->offset - addr;
+ e_len -= offset;
+ }
+
+ for ( i = 0; i < e_len; i++ )
+ {
+ e_val = *(uint8_t *)(&pd->config[e_addr] + i);
+ *val &= ~(0xff << ( (offset + i) * 8));
+ *val |= (e_val << ( (offset + i) * 8));
+ }
+
+ return e_len;
+}
+
+/* MSI-X virtulization functions */
+#define PT_MSIX_CTRL_WR_MASK_HI (0xC0)
+static void mask_physical_msix_entry(struct pt_dev *dev, int entry_nr, int mask)
+{
+ void *phys_off;
+
+ phys_off = dev->msix->phys_iomem_base + 16 * entry_nr + 12;
+ *(uint32_t *)phys_off = mask;
+}
+
+static int pt_msix_update_one(struct pt_dev *dev, int entry_nr)
+{
+ struct msix_entry_info *entry = &dev->msix->msix_entry[entry_nr];
+ int pirq = entry->pirq;
+ int gvec = entry->io_mem[2] & 0xff;
+ uint64_t gaddr = *(uint64_t *)&entry->io_mem[0];
+ uint32_t gflags = __get_msi_gflags(entry->io_mem[2], gaddr);
+ int ret;
+
+ if ( !entry->flags )
+ return 0;
+
+ /* Check if this entry is already mapped */
+ if ( entry->pirq == -1 )
+ {
+ ret = xc_physdev_map_pirq_msi(xc_handle, domid, MAP_PIRQ_TYPE_MSI,
+ AUTO_ASSIGN, &pirq,
+ dev->pci_dev->dev << 3 | dev->pci_dev->func,
+ dev->pci_dev->bus, entry_nr, 0);
+ if ( ret )
+ {
+ PT_LOG("error map msix entry %x\n", entry_nr);
+ return ret;
+ }
+ entry->pirq = pirq;
+ }
+
+ PT_LOG("now update msix entry %x with pirq %x gvec %x\n",
+ entry_nr, pirq, gvec);
+
+ ret = xc_domain_update_msi_irq(xc_handle, domid, gvec, pirq, gflags);
+ if ( ret )
+ {
+ PT_LOG("error update msix irq info for entry %d\n", entry_nr);
+ return ret;
+ }
+
+ entry->flags = 0;
+
+ return 0;
+}
+
+static int pt_msix_update(struct pt_dev *dev)
+{
+ struct pt_msix_info *msix = dev->msix;
+ int i;
+
+ for ( i = 0; i < msix->total_entries; i++ )
+ {
+ pt_msix_update_one(dev, i);
+ }
+
+ return 0;
+}
+
+static void pci_msix_invalid_write(void *opaque, target_phys_addr_t addr,
+ uint32_t val)
+{
+ PT_LOG("invalid write to MSI-X table, \
+ only dword access is allowed.\n");
+}
+
+static void pci_msix_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
+{
+ struct pt_dev *dev = (struct pt_dev *)opaque;
+ struct pt_msix_info *msix = dev->msix;
+ struct msix_entry_info *entry;
+ int entry_nr, offset;
+
+ if ( addr % 4 )
+ {
+ PT_LOG("unaligned dword access to MSI-X table, addr %016lx\n",
+ addr);
+ return;
+ }
+
+ entry_nr = (addr - msix->mmio_base_addr) / 16;
+ entry = &msix->msix_entry[entry_nr];
+ offset = ((addr - msix->mmio_base_addr) % 16) / 4;
+
+ if ( offset != 3 && msix->enabled && entry->io_mem[3] & 0x1 )
+ {
+ PT_LOG("can not update msix entry %d since MSI-X is already \
+ function now.\n", entry_nr);
+ return;
+ }
+
+ if ( offset != 3 && entry->io_mem[offset] != val )
+ entry->flags = 1;
+ entry->io_mem[offset] = val;
+
+ if ( offset == 3 )
+ {
+ if ( !(val & 0x1) )
+ pt_msix_update_one(dev, entry_nr);
+ mask_physical_msix_entry(dev, entry_nr, entry->io_mem[3] & 0x1);
+ }
+}
+
+static CPUWriteMemoryFunc *pci_msix_write[] = {
+ pci_msix_invalid_write,
+ pci_msix_invalid_write,
+ pci_msix_writel
+};
+
+static uint32_t pci_msix_invalid_read(void *opaque, target_phys_addr_t addr)
+{
+ PT_LOG("invalid read to MSI-X table, \
+ only dword access is allowed.\n");
+ return 0;
+}
+
+static uint32_t pci_msix_readl(void *opaque, target_phys_addr_t addr)
+{
+ struct pt_dev *dev = (struct pt_dev *)opaque;
+ struct pt_msix_info *msix = dev->msix;
+ int entry_nr, offset;
+
+ if ( addr % 4 )
+ {
+ PT_LOG("unaligned dword access to MSI-X table, addr %016lx\n",
+ addr);
+ return 0;
+ }
+
+ entry_nr = (addr - msix->mmio_base_addr) / 16;
+ offset = ((addr - msix->mmio_base_addr) % 16) / 4;
+
+ return msix->msix_entry[entry_nr].io_mem[offset];
+}
+
+static CPUReadMemoryFunc *pci_msix_read[] = {
+ pci_msix_invalid_read,
+ pci_msix_invalid_read,
+ pci_msix_readl
+};
+
+int add_msix_mapping(struct pt_dev *dev, int bar_index)
+{
+ if ( !(dev->msix && dev->msix->bar_index == bar_index) )
+ return 0;
+
+ return xc_domain_memory_mapping(xc_handle, domid,
+ dev->msix->mmio_base_addr >> XC_PAGE_SHIFT,
+ (dev->bases[bar_index].access.maddr
+ + dev->msix->table_off) >> XC_PAGE_SHIFT,
+ (dev->msix->total_entries * 16
+ + XC_PAGE_SIZE -1) >> XC_PAGE_SHIFT,
+ DPCI_ADD_MAPPING);
+}
+
+int remove_msix_mapping(struct pt_dev *dev, int bar_index)
+{
+ if ( !(dev->msix && dev->msix->bar_index == bar_index) )
+ return 0;
+
+ dev->msix->mmio_base_addr = dev->bases[bar_index].e_physbase
+ + dev->msix->table_off;
+
+ cpu_register_physical_memory(dev->msix->mmio_base_addr,
+ dev->msix->total_entries * 16,
+ dev->msix->mmio_index);
+
+ return xc_domain_memory_mapping(xc_handle, domid,
+ dev->msix->mmio_base_addr >> XC_PAGE_SHIFT,
+ (dev->bases[bar_index].access.maddr
+ + dev->msix->table_off) >> XC_PAGE_SHIFT,
+ (dev->msix->total_entries * 16
+ + XC_PAGE_SIZE -1) >> XC_PAGE_SHIFT,
+ DPCI_REMOVE_MAPPING);
+}
+
+int pt_msix_init(struct pt_dev *dev, int pos)
+{
+ uint8_t id;
+ uint16_t flags, control;
+ int i, total_entries, table_off, bar_index;
+ uint64_t bar_base;
+ struct pci_dev *pd = dev->pci_dev;
+
+ id = pci_read_byte(pd, pos + PCI_CAP_LIST_ID);
+
+ if ( id != PCI_CAP_ID_MSIX )
+ {
+ PT_LOG("error id %x pos %x\n", id, pos);
+ return -1;
+ }
+
+ control = pci_read_word(pd, pos + 2);
+ total_entries = control & 0x7ff;
+ total_entries += 1;
+
+ dev->msix = malloc(sizeof(struct pt_msix_info)
+ + total_entries*sizeof(struct msix_entry_info));
+ if ( !dev->msix )
+ {
+ PT_LOG("error allocation pt_msix_info\n");
+ return -1;
+ }
+ memset(dev->msix, 0, sizeof(struct pt_msix_info)
+ + total_entries*sizeof(struct msix_entry_info));
+ dev->msix->total_entries = total_entries;
+ dev->msix->offset = pos;
+ for ( i = 0; i < total_entries; i++ )
+ dev->msix->msix_entry[i].pirq = -1;
+
+ dev->msix->mmio_index =
+ cpu_register_io_memory(0, pci_msix_read, pci_msix_write, dev);
+
+ flags = pci_read_word(pd, pos + PCI_MSI_FLAGS);
+ if ( flags & PCI_MSIX_ENABLE )
+ {
+ PT_LOG("MSIX enabled already, disable first\n");
+ pci_write_word(pd, pos + PCI_MSI_FLAGS, flags & ~PCI_MSIX_ENABLE);
+ *(uint16_t *)&dev->dev.config[pos + PCI_MSI_FLAGS]
+ = flags & ~(PCI_MSIX_ENABLE | PCI_MSIX_MASK);
+ }
+
+ table_off = pci_read_long(pd, pos + PCI_MSIX_TABLE);
+ bar_index = dev->msix->bar_index = table_off & PCI_MSIX_BIR;
+ table_off &= table_off & ~PCI_MSIX_BIR;
+ bar_base = pci_read_long(pd, 0x10 + 4 * bar_index);
+ if ( (bar_base & 0x6) == 0x4 )
+ {
+ bar_base &= ~0xf;
+ bar_base += (uint64_t)pci_read_long(pd, 0x10 + 4 * (bar_index + 1)) << 32;
+ }
+ PT_LOG("get MSI-X table bar base %lx\n", bar_base);
+
+ dev->msix->fd = open("/dev/mem", O_RDWR);
+ dev->msix->phys_iomem_base = mmap(0, total_entries * 16,
+ PROT_WRITE | PROT_READ, MAP_SHARED | MAP_LOCKED,
+ dev->msix->fd, bar_base + table_off);
+ PT_LOG("mapping physical MSI-X table to %lx\n",
+ (unsigned long)dev->msix->phys_iomem_base);
+ return 0;
+}
+
+static int pt_msix_enable(struct pt_dev *d, int enable)
+{
+ uint16_t ctrl;
+ struct pci_dev *pd = d->pci_dev;
+
+ if ( !pd )
+ return -1;
+
+ ctrl = pci_read_word(pd, d->msix->offset + PCI_MSI_FLAGS);
+ if ( enable )
+ ctrl |= PCI_MSIX_ENABLE;
+ else
+ ctrl &= ~PCI_MSIX_ENABLE;
+ pci_write_word(pd, d->msix->offset + PCI_MSI_FLAGS, ctrl);
+ d->msix->enabled = !!enable;
+
+ return 0;
+}
+
+static int pt_msix_func_mask(struct pt_dev *d, int mask)
+{
+ uint16_t ctrl;
+ struct pci_dev *pd = d->pci_dev;
+
+ if ( !pd )
+ return -1;
+
+ ctrl = pci_read_word(pd, d->msix->offset + PCI_MSI_FLAGS);
+
+ if ( mask )
+ ctrl |= PCI_MSIX_MASK;
+ else
+ ctrl &= ~PCI_MSIX_MASK;
+
+ pci_write_word(pd, d->msix->offset + PCI_MSI_FLAGS, ctrl);
+ return 0;
+}
+
+static int pt_msix_control_update(struct pt_dev *d)
+{
+ PCIDevice *pd = (PCIDevice *)d;
+ uint16_t ctrl = *(uint16_t *)(&pd->config[d->msix->offset + 2]);
+
+ if ( ctrl & PCI_MSIX_ENABLE && !(ctrl & PCI_MSIX_MASK ) )
+ pt_msix_update(d);
+
+ pt_msix_func_mask(d, ctrl & PCI_MSIX_MASK);
+ pt_msix_enable(d, ctrl & PCI_MSIX_ENABLE);
+
+ return 0;
+}
+
+int pt_msix_write(struct pt_dev *d, uint32_t addr, uint32_t val, uint32_t len)
+{
+ struct pci_dev *pd;
+ int i, cur = addr;
+ uint8_t value;
+ PCIDevice *dev = (PCIDevice *)d;
+
+ if ( !d || !d->msix )
+ return 0;
+
+ if ( (addr >= (d->msix->offset + 4) ) ||
+ (addr + len) < d->msix->offset)
+ return 0;
+
+ PT_LOG("addr %x val %x len %x offset %x\n",
+ addr, val, len, d->msix->offset);
+
+ pd = d->pci_dev;
+
+ for ( i = 0; i < len; i++, cur++ )
+ {
+ uint8_t orig_value;
+
+ if ( cur != d->msix->offset + 3 )
+ continue;
+
+ value = (val >> (i * 8)) & 0xff;
+
+ orig_value = pci_read_byte(pd, cur);
+ value = (orig_value & ~PT_MSIX_CTRL_WR_MASK_HI) |
+ (value & PT_MSIX_CTRL_WR_MASK_HI);
+ dev->config[cur] = value;
+ pt_msix_control_update(d);
+ return 1;
+ }
+
+ return 0;
+}
+
+int pt_msix_read(struct pt_dev *d, int addr, int len, uint32_t *val)
+{
+ int e_addr = addr, e_len = len, offset = 0, i;
+ uint8_t e_val = 0;
+ PCIDevice *pd = (PCIDevice *)d;
+
+ if ( !d || !d->msix )
+ return 0;
+
+ if ( (addr > (d->msix->offset + 3) ) ||
+ (addr + len) <= d->msix->offset )
+ return 0;
+
+ if ( (addr + len ) > (d->msix->offset + 3) )
+ e_len -= addr + len - d->msix->offset - 3;
+
+ if ( addr < d->msix->offset )
+ {
+ e_addr = d->msix->offset;
+ offset = d->msix->offset - addr;
+ e_len -= offset;
+ }
+
+ for ( i = 0; i < e_len; i++ )
+ {
+ e_val = *(uint8_t *)(&pd->config[e_addr] + i);
+ *val &= ~(0xff << ( (offset + i) * 8));
+ *val |= (e_val << ( (offset + i) * 8));
+ }
+
+ PT_LOG("addr %x len %x val %x offset %x\n",
+ addr, len, *val, d->msix->offset);
+
+ return e_len;
+}
+
--- /dev/null
+#ifndef _PT_MSI_H
+#define _PT_MSI_H
+
+#include "vl.h"
+#include "pci/header.h"
+#include "pci/pci.h"
+#include "pass-through.h"
+
+#define MSI_FLAG_UNINIT 0x1000
+#define PT_MSI_MAPPED 0x2000
+
+#define MSI_DATA_VECTOR_SHIFT 0
+#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
+
+#define MSI_DATA_DELIVERY_SHIFT 8
+#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
+#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT)
+
+#define MSI_DATA_LEVEL_SHIFT 14
+#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
+#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
+
+#define MSI_DATA_TRIGGER_SHIFT 15
+#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
+#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
+
+/*
+ + * Shift/mask fields for APIC-based bus address
+ + */
+
+#define MSI_ADDR_HEADER 0xfee00000
+#define MSI_TARGET_CPU_SHIFT 12
+
+#define MSI_ADDR_DESTID_MASK 0xfff0000f
+#define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT)
+
+#define MSI_ADDR_DESTMODE_SHIFT 2
+#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT)
+#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
+
+#define MSI_ADDR_REDIRECTION_SHIFT 3
+#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
+#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
+
+#define PCI_MSI_FLAGS_PVMASK 0x100
+
+#define AUTO_ASSIGN -1
+
+/* shift count for gflags */
+#define GFLAGS_SHIFT_DEST_ID 0
+#define GFLAGS_SHIFT_RH 8
+#define GFLAGS_SHIFT_DM 9
+#define GLFAGS_SHIFT_DELIV_MODE 12
+#define GLFAGS_SHIFT_TRG_MODE 15
+
+int
+pt_msi_init(struct pt_dev *dev, int pos);
+
+int
+pt_msi_write(struct pt_dev *d, uint32_t addr, uint32_t val, uint32_t len);
+
+int
+pt_msi_read(struct pt_dev *d, int addr, int len, uint32_t *val);
+
+int
+remove_msix_mapping(struct pt_dev *dev, int bar_index);
+
+int
+add_msix_mapping(struct pt_dev *dev, int bar_index);
+
+int
+pt_msix_init(struct pt_dev *dev, int pos);
+
+int
+pt_msix_write(struct pt_dev *d, uint32_t addr, uint32_t val, uint32_t len);
+
+int
+pt_msix_read(struct pt_dev *d, int addr, int len, uint32_t *val);
+
+#endif
static void rtl8139_CpCmd_write(RTL8139State *s, uint32_t val)
{
+ int i;
+
val &= 0xffff;
DEBUG_PRINT(("RTL8139C+ command register write(w) val=0x%04x\n", val));
/* mask unwriteable bits */
val = SET_MASKED(val, 0xff84, s->CpCmd);
+ if ( (s->CpCmd & CPlusTxEnb) &&
+ !(val & CPlusTxEnb) )
+ {
+ /* Reset TX status when the transmitter drops from C+ to
+ non-C+ mode. Windows has a habit of turning off C+ and
+ then waiting for the TX requests to clear as part of shut
+ down, and you get stuck forever if there are old DTCRs in
+ the registers. */
+ for (i = 0; i < 4; i++)
+ {
+ s->TxStatus[i] = TxHostOwns;
+ }
+ }
+
s->CpCmd = val;
}
{
DEBUG_PRINT(("RTL8139: +++ cannot transmit from descriptor %d: transmitter disabled\n",
descriptor));
+ s->TxStatus[descriptor] = TxAborted | TxHostOwns;
return 0;
}
{
DEBUG_PRINT(("RTL8139: +++ cannot transmit from descriptor %d: owned by host (%08x)\n",
descriptor, s->TxStatus[descriptor]));
+ s->TxStatus[descriptor] = TxAborted | TxHostOwns;
return 0;
}
uint32_t line_compare; \
uint32_t start_addr; \
uint32_t plane_updated; \
+ uint32_t last_line_offset; \
uint8_t last_cw, last_ch; \
uint32_t last_width, last_height; /* in chars or pixels */ \
uint32_t last_scr_width, last_scr_height; /* in pixels */ \
#define BTN_LEFT 0x110 /* from <linux/input.h> */
#endif
-// FIXME defend against malicious frontend?
-
struct xenfb;
struct xenfb_device {
int depth; /* colour depth of guest framebuffer */
int width; /* pixel width of guest framebuffer */
int height; /* pixel height of guest framebuffer */
+ int offset; /* offset of the framebuffer */
int abs_pointer_wanted; /* Whether guest supports absolute pointer */
int button_state; /* Last seen pointer button state */
+ int refresh_period; /* The refresh period we have advised */
char protocol[64]; /* frontend protocol */
};
free(xenfb);
}
+static int xenfb_configure_fb(struct xenfb *xenfb, size_t fb_len_lim,
+ int width, int height, int depth,
+ size_t fb_len, int offset, int row_stride)
+{
+ size_t mfn_sz = sizeof(*((struct xenfb_page *)0)->pd);
+ size_t pd_len = sizeof(((struct xenfb_page *)0)->pd) / mfn_sz;
+ size_t fb_pages = pd_len * XC_PAGE_SIZE / mfn_sz;
+ size_t fb_len_max = fb_pages * XC_PAGE_SIZE;
+ int max_width, max_height;
+
+ if (fb_len_lim > fb_len_max) {
+ fprintf(stderr,
+ "FB: fb size limit %zu exceeds %zu, corrected\n",
+ fb_len_lim, fb_len_max);
+ fb_len_lim = fb_len_max;
+ }
+ if (fb_len > fb_len_lim) {
+ fprintf(stderr,
+ "FB: frontend fb size %zu limited to %zu\n",
+ fb_len, fb_len_lim);
+ fb_len = fb_len_lim;
+ }
+ if (depth != 8 && depth != 16 && depth != 24 && depth != 32) {
+ fprintf(stderr,
+ "FB: can't handle frontend fb depth %d\n",
+ depth);
+ return -1;
+ }
+ if (row_stride < 0 || row_stride > fb_len) {
+ fprintf(stderr,
+ "FB: invalid frontend stride %d\n", row_stride);
+ return -1;
+ }
+ max_width = row_stride / (depth / 8);
+ if (width < 0 || width > max_width) {
+ fprintf(stderr,
+ "FB: invalid frontend width %d limited to %d\n",
+ width, max_width);
+ width = max_width;
+ }
+ if (offset < 0 || offset >= fb_len) {
+ fprintf(stderr,
+ "FB: invalid frontend offset %d (max %zu)\n",
+ offset, fb_len - 1);
+ return -1;
+ }
+ max_height = (fb_len - offset) / row_stride;
+ if (height < 0 || height > max_height) {
+ fprintf(stderr,
+ "FB: invalid frontend height %d limited to %d\n",
+ height, max_height);
+ height = max_height;
+ }
+ xenfb->fb_len = fb_len;
+ xenfb->row_stride = row_stride;
+ xenfb->depth = depth;
+ xenfb->width = width;
+ xenfb->height = height;
+ xenfb->offset = offset;
+ fprintf(stderr, "Framebuffer %dx%dx%d offset %d stride %d\n",
+ width, height, depth, offset, row_stride);
+ return 0;
+}
static void xenfb_on_fb_event(struct xenfb *xenfb)
{
|| h != event->update.height) {
fprintf(stderr, "%s bogus update clipped\n",
xenfb->fb.nodename);
- break;
}
xenfb_guest_copy(xenfb, x, y, w, h);
break;
+ case XENFB_TYPE_RESIZE:
+ if (xenfb_configure_fb(xenfb, xenfb->fb_len,
+ event->resize.width,
+ event->resize.height,
+ event->resize.depth,
+ xenfb->fb_len,
+ event->resize.offset,
+ event->resize.stride) < 0)
+ break;
+ dpy_colourdepth(xenfb->ds, xenfb->depth);
+ dpy_resize(xenfb->ds, xenfb->width, xenfb->height, xenfb->row_stride);
+ if (xenfb->ds->shared_buf)
+ dpy_setdata(xenfb->ds, xenfb->pixels + xenfb->offset);
+ xenfb_invalidate(xenfb);
+ break;
}
}
xen_mb(); /* ensure we're done with ring contents */
xc_evtchn_notify(xenfb->evt_xch, xenfb->fb.port);
}
+static int xenfb_queue_full(struct xenfb *xenfb)
+{
+ struct xenfb_page *page = xenfb->fb.page;
+ uint32_t cons, prod;
+
+ prod = page->in_prod;
+ cons = page->in_cons;
+ return prod - cons == XENFB_IN_RING_LEN;
+}
+
+static void xenfb_send_event(struct xenfb *xenfb, union xenfb_in_event *event)
+{
+ uint32_t prod;
+ struct xenfb_page *page = xenfb->fb.page;
+
+ prod = page->in_prod;
+ /* caller ensures !xenfb_queue_full() */
+ xen_mb(); /* ensure ring space available */
+ XENFB_IN_RING_REF(page, prod) = *event;
+ xen_wmb(); /* ensure ring contents visible */
+ page->in_prod = prod + 1;
+
+ xc_evtchn_notify(xenfb->evt_xch, xenfb->fb.port);
+}
+
+static void xenfb_send_refresh_period(struct xenfb *xenfb, int period)
+{
+ union xenfb_in_event event;
+
+ memset(&event, 0, sizeof(event));
+ event.type = XENFB_TYPE_REFRESH_PERIOD;
+ event.refresh_period.period = period;
+ xenfb_send_event(xenfb, &event);
+}
+
static void xenfb_on_kbd_event(struct xenfb *xenfb)
{
struct xenkbd_page *page = xenfb->kbd.page;
static int xenfb_read_frontend_fb_config(struct xenfb *xenfb) {
struct xenfb_page *fb_page;
int val;
+ int videoram;
if (xenfb_xs_scanf1(xenfb->xsh, xenfb->fb.otherend, "feature-update",
"%d", &val) < 0)
xenfb->protocol) < 0)
xenfb->protocol[0] = '\0';
xenfb_xs_printf(xenfb->xsh, xenfb->fb.nodename, "request-update", "1");
+ xenfb->refresh_period = -1;
+
+ if (xenfb_xs_scanf1(xenfb->xsh, xenfb->fb.nodename, "videoram", "%d",
+ &videoram) < 0)
+ videoram = 0;
+ fb_page = xenfb->fb.page;
+ if (xenfb_configure_fb(xenfb, videoram * 1024 * 1024U,
+ fb_page->width, fb_page->height, fb_page->depth,
+ fb_page->mem_length, 0, fb_page->line_length)
+ < 0) {
+ errno = EINVAL;
+ return -1;
+ }
- /* TODO check for permitted ranges */
- fb_page = xenfb->fb.page;
- xenfb->depth = fb_page->depth;
- xenfb->width = fb_page->width;
- xenfb->height = fb_page->height;
- /* TODO check for consistency with the above */
- xenfb->fb_len = fb_page->mem_length;
- xenfb->row_stride = fb_page->line_length;
- fprintf(stderr, "Framebuffer depth %d width %d height %d line %d\n",
- fb_page->depth, fb_page->width, fb_page->height, fb_page->line_length);
if (xenfb_map_fb(xenfb, xenfb->fb.otherend_id) < 0)
return -1;
+ /* Indicate we have the frame buffer resize feature */
+ xenfb_xs_printf(xenfb->xsh, xenfb->fb.nodename, "feature-resize", "1");
+
+ /* Tell kbd pointer the screen geometry */
+ xenfb_xs_printf(xenfb->xsh, xenfb->kbd.nodename, "width", "%d", xenfb->width);
+ xenfb_xs_printf(xenfb->xsh, xenfb->kbd.nodename, "height", "%d", xenfb->height);
+
if (xenfb_switch_state(&xenfb->fb, XenbusStateConnected))
return -1;
if (xenfb_switch_state(&xenfb->kbd, XenbusStateConnected))
#define BLT(SRC_T,DST_T,RSB,GSB,BSB,RDB,GDB,BDB) \
for (line = y ; line < (y+h) ; line++) { \
SRC_T *src = (SRC_T *)(xenfb->pixels \
+ + xenfb->offset \
+ (line * xenfb->row_stride) \
+ (x * xenfb->depth / 8)); \
DST_T *dst = (DST_T *)(xenfb->ds->data \
if (xenfb->depth == xenfb->ds->depth) { /* Perfect match can use fast path */
for (line = y ; line < (y+h) ; line++) {
memcpy(xenfb->ds->data + (line * xenfb->ds->linesize) + (x * xenfb->ds->depth / 8),
- xenfb->pixels + (line * xenfb->row_stride) + (x * xenfb->depth / 8),
+ xenfb->pixels + xenfb->offset + (line * xenfb->row_stride) + (x * xenfb->depth / 8),
w * xenfb->depth / 8);
}
} else { /* Mismatch requires slow pixel munging */
dpy_update(xenfb->ds, x, y, w, h);
}
-/* Periodic update of display, no need for any in our case */
+/* Periodic update of display, transmit the refresh interval to the frontend */
static void xenfb_update(void *opaque)
{
struct xenfb *xenfb = opaque;
+ int period;
+
+ if (xenfb_queue_full(xenfb))
+ return;
+
+ if (xenfb->ds->idle)
+ period = XENFB_NO_REFRESH;
+ else {
+ period = xenfb->ds->gui_timer_interval;
+ if (!period)
+ period = GUI_REFRESH_INTERVAL;
+ }
+
+ /* Will have to be disabled for frontends without feature-update */
+ if (xenfb->refresh_period != period) {
+ xenfb_send_refresh_period(xenfb, period);
+ xenfb->refresh_period = period;
+ }
}
/* QEMU display state changed, so refresh the framebuffer copy */
}
#ifdef CONFIG_STUBDOM
-static struct semaphore kbd_sem = __SEMAPHORE_INITIALIZER(kbd_sem, 0);
-static struct kbdfront_dev *kbd_dev;
+typedef struct XenFBState {
+ struct semaphore kbd_sem;
+ struct kbdfront_dev *kbd_dev;
+ struct fbfront_dev *fb_dev;
+ void *vga_vram, *nonshared_vram;
+ DisplayState *ds;
+} XenFBState;
+
+XenFBState *xs;
+
static char *kbd_path, *fb_path;
static unsigned char linux2scancode[KEY_MAX + 1];
-#define WIDTH 1024
-#define HEIGHT 768
-#define DEPTH 32
-#define LINESIZE (1280 * (DEPTH / 8))
-#define MEMSIZE (LINESIZE * HEIGHT)
-
int xenfb_connect_vkbd(const char *path)
{
kbd_path = strdup(path);
return 0;
}
-static void xenfb_pv_update(DisplayState *s, int x, int y, int w, int h)
+static void xenfb_pv_update(DisplayState *ds, int x, int y, int w, int h)
{
- struct fbfront_dev *fb_dev = s->opaque;
+ XenFBState *xs = ds->opaque;
+ struct fbfront_dev *fb_dev = xs->fb_dev;
+ if (!fb_dev)
+ return;
fbfront_update(fb_dev, x, y, w, h);
}
-static void xenfb_pv_resize(DisplayState *s, int w, int h, int linesize)
+static void xenfb_pv_resize(DisplayState *ds, int w, int h, int linesize)
{
- struct fbfront_dev *fb_dev = s->opaque;
- fprintf(stderr,"resize to %dx%d required\n", w, h);
- s->width = w;
- s->height = h;
- /* TODO: send resize event if supported */
- memset(s->data, 0, MEMSIZE);
- fbfront_update(fb_dev, 0, 0, WIDTH, HEIGHT);
+ XenFBState *xs = ds->opaque;
+ struct fbfront_dev *fb_dev = xs->fb_dev;
+ fprintf(stderr,"resize to %dx%d, %d required\n", w, h, linesize);
+ ds->width = w;
+ ds->height = h;
+ if (!linesize)
+ ds->shared_buf = 0;
+ if (!ds->shared_buf)
+ linesize = w * 4;
+ ds->linesize = linesize;
+ if (!fb_dev)
+ return;
+ if (ds->shared_buf) {
+ ds->data = NULL;
+ } else {
+ ds->data = xs->nonshared_vram;
+ fbfront_resize(fb_dev, w, h, linesize, ds->depth, VGA_RAM_SIZE);
+ }
}
static void xenfb_pv_colourdepth(DisplayState *ds, int depth)
{
- /* TODO: send redepth event if supported */
+ XenFBState *xs = ds->opaque;
+ struct fbfront_dev *fb_dev = xs->fb_dev;
static int lastdepth = -1;
+ if (!depth) {
+ ds->shared_buf = 0;
+ ds->depth = 32;
+ } else {
+ ds->shared_buf = 1;
+ ds->depth = depth;
+ }
if (depth != lastdepth) {
fprintf(stderr,"redepth to %d required\n", depth);
lastdepth = depth;
+ } else return;
+ if (!fb_dev)
+ return;
+ if (ds->shared_buf) {
+ ds->data = NULL;
+ } else {
+ ds->data = xs->nonshared_vram;
+ fbfront_resize(fb_dev, ds->width, ds->height, ds->linesize, ds->depth, VGA_RAM_SIZE);
+ }
+}
+
+static void xenfb_pv_setdata(DisplayState *ds, void *pixels)
+{
+ XenFBState *xs = ds->opaque;
+ struct fbfront_dev *fb_dev = xs->fb_dev;
+ int offset = pixels - xs->vga_vram;
+ ds->data = pixels;
+ if (!fb_dev)
+ return;
+ fbfront_resize(fb_dev, ds->width, ds->height, ds->linesize, ds->depth, offset);
+}
+
+static void xenfb_pv_refresh(DisplayState *ds)
+{
+ vga_hw_update();
+}
+
+static void xenfb_fb_handler(void *opaque)
+{
+#define FB_NUM_BATCH 4
+ union xenfb_in_event buf[FB_NUM_BATCH];
+ int n, i;
+ XenFBState *xs = opaque;
+ DisplayState *ds = xs->ds;
+
+ n = fbfront_receive(xs->fb_dev, buf, FB_NUM_BATCH);
+ for (i = 0; i < n; i++) {
+ switch (buf[i].type) {
+ case XENFB_TYPE_REFRESH_PERIOD:
+ if (buf[i].refresh_period.period == XENFB_NO_REFRESH) {
+ /* Sleeping interval */
+ ds->idle = 1;
+ ds->gui_timer_interval = 500;
+ } else {
+ /* Set interval */
+ ds->idle = 0;
+ ds->gui_timer_interval = buf[i].refresh_period.period;
+ }
+ default:
+ /* ignore unknown events */
+ break;
+ }
}
- /* We can't redepth for now */
- ds->depth = DEPTH;
}
static void xenfb_kbd_handler(void *opaque)
#define KBD_NUM_BATCH 64
union xenkbd_in_event buf[KBD_NUM_BATCH];
int n, i;
- DisplayState *s = opaque;
+ XenFBState *xs = opaque;
+ DisplayState *s = xs->ds;
static int buttons;
static int x, y;
- n = kbdfront_receive(kbd_dev, buf, KBD_NUM_BATCH);
+ n = kbdfront_receive(xs->kbd_dev, buf, KBD_NUM_BATCH);
for (i = 0; i < n; i++) {
switch (buf[i].type) {
}
}
-static void xenfb_pv_refresh(DisplayState *ds)
-{
- /* always request negociation */
- ds->depth = -1;
- vga_hw_update();
-}
-
static void kbdfront_thread(void *p)
{
int scancode, keycode;
- kbd_dev = init_kbdfront(p, 1);
- if (!kbd_dev) {
+ XenFBState *xs = p;
+ xs->kbd_dev = init_kbdfront(kbd_path, 1);
+ if (!xs->kbd_dev) {
fprintf(stderr,"can't open keyboard\n");
exit(1);
}
- up(&kbd_sem);
+ up(&xs->kbd_sem);
for (scancode = 0; scancode < 128; scancode++) {
keycode = atkbd_set2_keycode[atkbd_unxlate_table[scancode]];
linux2scancode[keycode] = scancode;
int xenfb_pv_display_init(DisplayState *ds)
{
- void *data;
+ if (!fb_path || !kbd_path)
+ return -1;
+
+ xs = qemu_mallocz(sizeof(XenFBState));
+ if (!xs)
+ return -1;
+
+ init_SEMAPHORE(&xs->kbd_sem, 0);
+ xs->ds = ds;
+
+ create_thread("kbdfront", kbdfront_thread, (void*) xs);
+
+ ds->data = xs->nonshared_vram = qemu_memalign(PAGE_SIZE, VGA_RAM_SIZE);
+ memset(ds->data, 0, VGA_RAM_SIZE);
+ ds->opaque = xs;
+ ds->depth = 32;
+ ds->bgr = 0;
+ ds->width = 640;
+ ds->height = 400;
+ ds->linesize = 640 * 4;
+ ds->dpy_update = xenfb_pv_update;
+ ds->dpy_resize = xenfb_pv_resize;
+ ds->dpy_colourdepth = xenfb_pv_colourdepth;
+ ds->dpy_setdata = xenfb_pv_setdata;
+ ds->dpy_refresh = xenfb_pv_refresh;
+ return 0;
+}
+
+int xenfb_pv_display_start(void *data)
+{
+ DisplayState *ds;
struct fbfront_dev *fb_dev;
- int kbd_fd;
+ int kbd_fd, fb_fd;
+ int offset = 0;
+ unsigned long *mfns;
+ int n = VGA_RAM_SIZE / PAGE_SIZE;
+ int i;
if (!fb_path || !kbd_path)
- return -1;
+ return 0;
- create_thread("kbdfront", kbdfront_thread, (void*) kbd_path);
+ ds = xs->ds;
+ xs->vga_vram = data;
+ mfns = malloc(2 * n * sizeof(*mfns));
+ for (i = 0; i < n; i++)
+ mfns[i] = virtual_to_mfn(xs->vga_vram + i * PAGE_SIZE);
+ for (i = 0; i < n; i++)
+ mfns[n + i] = virtual_to_mfn(xs->nonshared_vram + i * PAGE_SIZE);
- data = qemu_memalign(PAGE_SIZE, VGA_RAM_SIZE);
- fb_dev = init_fbfront(fb_path, data, WIDTH, HEIGHT, DEPTH, LINESIZE, MEMSIZE);
+ fb_dev = init_fbfront(fb_path, mfns, ds->width, ds->height, ds->depth, ds->linesize, 2 * n);
+ free(mfns);
if (!fb_dev) {
fprintf(stderr,"can't open frame buffer\n");
exit(1);
}
free(fb_path);
- down(&kbd_sem);
+ if (ds->shared_buf) {
+ offset = (void*) ds->data - xs->vga_vram;
+ } else {
+ offset = VGA_RAM_SIZE;
+ ds->data = xs->nonshared_vram;
+ }
+ if (offset)
+ fbfront_resize(fb_dev, ds->width, ds->height, ds->linesize, ds->depth, offset);
+
+ down(&xs->kbd_sem);
free(kbd_path);
- kbd_fd = kbdfront_open(kbd_dev);
- qemu_set_fd_handler(kbd_fd, xenfb_kbd_handler, NULL, ds);
+ kbd_fd = kbdfront_open(xs->kbd_dev);
+ qemu_set_fd_handler(kbd_fd, xenfb_kbd_handler, NULL, xs);
- ds->data = data;
- ds->linesize = LINESIZE;
- ds->depth = DEPTH;
- ds->bgr = 0;
- ds->width = WIDTH;
- ds->height = HEIGHT;
- ds->dpy_update = xenfb_pv_update;
- ds->dpy_resize = xenfb_pv_resize;
- ds->dpy_colourdepth = xenfb_pv_colourdepth;
- ds->dpy_refresh = xenfb_pv_refresh;
- ds->opaque = fb_dev;
+ fb_fd = fbfront_open(fb_dev);
+ qemu_set_fd_handler(fb_fd, xenfb_fb_handler, NULL, xs);
+
+ xs->fb_dev = fb_dev;
return 0;
}
#endif
else
time_offset = 0;
- xc_domain_set_time_offset(xc_handle, domid, time_offset);
-
free(p);
}
CPUState *env = opaque;
ioreq_t *req = cpu_get_ioreq();
- handle_buffered_io(env);
+ __handle_buffered_iopage(env);
if (req) {
__handle_ioreq(env, req);
struct key_range *keypad_range;
struct key_range *numlock_range;
struct key_range *shift_range;
+ struct key_range *localstate_range;
} kbd_layout_t;
static void add_to_key_range(struct key_range **krp, int code) {
add_to_key_range(&k->shift_range, keysym);
//fprintf(stderr, "shift keysym %04x keycode %d\n", keysym, keycode);
}
+ if (rest && strstr(rest, "localstate")) {
+ add_to_key_range(&k->localstate_range, keycode);
+ //fprintf(stderr, "localstate keysym %04x keycode %d\n", keysym, keycode);
+ }
/* if(keycode&0x80)
keycode=(keycode<<8)^0x80e0; */
return 1;
return 0;
}
+
+static int keycodeIsShiftable(void *kbd_layout, int keycode)
+{
+ kbd_layout_t *k = kbd_layout;
+ struct key_range *kr;
+
+ for (kr = k->localstate_range; kr; kr = kr->next)
+ if (keycode >= kr->start && keycode <= kr->end)
+ return 0;
+ return 1;
+}
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
+
+#include "config-host.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
--- /dev/null
+#include <stdlib.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <signal.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <assert.h>
+
+extern int init_blktap(void);
+extern void qemu_aio_init(void);
+extern void qemu_aio_poll(void);
+extern void bdrv_init(void);
+
+extern void *qemu_mallocz(size_t size);
+extern void qemu_free(void *ptr);
+
+extern void *fd_start;
+
+int domid = 0;
+FILE* logfile;
+
+void term_printf(const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+}
+
+void term_print_filename(const char *filename)
+{
+ term_printf(filename);
+}
+
+
+typedef void IOReadHandler(void *opaque, const uint8_t *buf, int size);
+typedef int IOCanRWHandler(void *opaque);
+typedef void IOHandler(void *opaque);
+
+typedef struct IOHandlerRecord {
+ int fd;
+ IOCanRWHandler *fd_read_poll;
+ IOHandler *fd_read;
+ IOHandler *fd_write;
+ int deleted;
+ void *opaque;
+ /* temporary data */
+ struct pollfd *ufd;
+ struct IOHandlerRecord *next;
+} IOHandlerRecord;
+
+static IOHandlerRecord *first_io_handler;
+
+int qemu_set_fd_handler2(int fd,
+ IOCanRWHandler *fd_read_poll,
+ IOHandler *fd_read,
+ IOHandler *fd_write,
+ void *opaque)
+{
+ IOHandlerRecord *ioh;
+
+ /* This is a stripped down version of fd handling */
+ assert(fd_read_poll == NULL);
+ assert(fd_write == NULL);
+
+ for(ioh = first_io_handler; ioh != NULL; ioh = ioh->next)
+ if (ioh->fd == fd)
+ goto found;
+
+ if (!fd_read && !fd_write)
+ return 0;
+
+ ioh = qemu_mallocz(sizeof(IOHandlerRecord));
+ if (!ioh)
+ return -1;
+ ioh->next = first_io_handler;
+ first_io_handler = ioh;
+
+found:
+ if (!fd_read && !fd_write) {
+ ioh->deleted = 1;
+ } else {
+ ioh->fd = fd;
+ ioh->fd_read = fd_read;
+ ioh->opaque = opaque;
+ ioh->deleted = 0;
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ IOHandlerRecord *ioh, **pioh;
+ int max_fd;
+ fd_set rfds;
+ struct timeval tv;
+ void *old_fd_start = NULL;
+
+ logfile = stderr;
+
+ bdrv_init();
+ qemu_aio_init();
+ init_blktap();
+
+ /* Daemonize */
+ if (fork() != 0)
+ exit(0);
+
+ /*
+ * Main loop: Pass events to the corrsponding handlers and check for
+ * completed aio operations.
+ */
+ while (1) {
+ qemu_aio_poll();
+
+ max_fd = -1;
+ FD_ZERO(&rfds);
+ for(ioh = first_io_handler; ioh != NULL; ioh = ioh->next)
+ if (!ioh->deleted) {
+ FD_SET(ioh->fd, &rfds);
+ max_fd = max_fd > ioh->fd ? max_fd : ioh->fd;
+ }
+
+ tv.tv_sec = 0;
+ tv.tv_usec = 10000;
+ if (select(max_fd + 1, &rfds, NULL, NULL, &tv) <= 0)
+ continue;
+
+ /* Call handlers */
+ for(ioh = first_io_handler; ioh != NULL; ioh = ioh->next)
+ if (FD_ISSET(ioh->fd, &rfds))
+ ioh->fd_read(ioh->opaque);
+
+ /* Remove deleted IO handlers */
+ pioh = &first_io_handler;
+ while (*pioh) {
+ ioh = *pioh;
+ if (ioh->deleted) {
+ *pioh = ioh->next;
+ qemu_free(ioh);
+ } else
+ pioh = &ioh->next;
+ }
+
+ /* Exit when the last image has been closed */
+ if (old_fd_start != NULL && fd_start == NULL)
+ exit(0);
+
+ old_fd_start = fd_start;
+ }
+ return 0;
+}
#else
#define DEFAULT_RAM_SIZE 128
#endif
-/* in ms */
-#define GUI_REFRESH_INTERVAL 30
/* Max number of USB devices that can be specified on the commandline. */
#define MAX_USB_CMDLINE 8
int full_screen = 0;
int no_frame = 0;
int no_quit = 0;
+#ifdef CONFIG_OPENGL
+int opengl_enabled = 1;
+#else
+int opengl_enabled = 0;
+#endif
CharDriverState *serial_hds[MAX_SERIAL_PORTS];
CharDriverState *parallel_hds[MAX_PARALLEL_PORTS];
#ifdef TARGET_I386
ds->dpy_update = dumb_update;
ds->dpy_resize = dumb_resize;
ds->dpy_refresh = dumb_refresh;
+ ds->gui_timer_interval = 500;
+ ds->idle = 1;
}
/***********************************************************/
"-alt-grab use Ctrl-Alt-Shift to grab mouse (instead of Ctrl-Alt)\n"
"-no-quit disable SDL window close capability\n"
#endif
+#ifdef CONFIG_OPENGL
+ "-disable-opengl disable OpenGL rendering, using SDL"
+#endif
#ifdef TARGET_I386
"-no-fd-bootchk disable boot signature checking for floppy disks\n"
#endif
QEMU_OPTION_no_frame,
QEMU_OPTION_alt_grab,
QEMU_OPTION_no_quit,
+ QEMU_OPTION_disable_opengl,
QEMU_OPTION_pidfile,
QEMU_OPTION_no_kqemu,
QEMU_OPTION_kernel_kqemu,
{ "alt-grab", 0, QEMU_OPTION_alt_grab },
{ "no-quit", 0, QEMU_OPTION_no_quit },
#endif
+ { "disable-opengl", 0, QEMU_OPTION_disable_opengl },
{ "pidfile", HAS_ARG, QEMU_OPTION_pidfile },
{ "win2k-hack", 0, QEMU_OPTION_win2k_hack },
{ "usbdevice", HAS_ARG, QEMU_OPTION_usbdevice },
no_quit = 1;
break;
#endif
+ case QEMU_OPTION_disable_opengl:
+ opengl_enabled = 0;
+ break;
case QEMU_OPTION_pidfile:
pid_file = optarg;
break;
#endif
{
#if defined(CONFIG_SDL)
- sdl_display_init(ds, full_screen, no_frame);
+ sdl_display_init(ds, full_screen, no_frame, opengl_enabled);
#elif defined(CONFIG_COCOA)
cocoa_display_init(ds, full_screen);
#else
char *x509key;
#endif
char challenge[VNC_AUTH_CHALLENGE_SIZE];
+ int switchbpp;
#if CONFIG_VNC_TLS
int wiremode;
vs->has_update = 0;
vnc_flush(vs);
vs->last_update_time = now;
+ vs->ds->idle = 0;
vs->timer_interval /= 2;
if (vs->timer_interval < VNC_REFRESH_INTERVAL_BASE)
vs->timer_interval += VNC_REFRESH_INTERVAL_INC;
if (vs->timer_interval > VNC_REFRESH_INTERVAL_MAX) {
vs->timer_interval = VNC_REFRESH_INTERVAL_MAX;
- if (now - vs->last_update_time >= VNC_MAX_UPDATE_INTERVAL &&
- vs->update_requested) {
- /* Send a null update. If the client is no longer
- interested (e.g. minimised) it'll ignore this, and we
- can stop scanning the buffer until it sends another
- update request. */
- /* It turns out that there's a bug in realvncviewer 4.1.2
- which means that if you send a proper null update (with
- no update rectangles), it gets a bit out of sync and
- never sends any further requests, regardless of whether
- it needs one or not. Fix this by sending a single 1x1
- update rectangle instead. */
- vnc_write_u8(vs, 0);
- vnc_write_u8(vs, 0);
- vnc_write_u16(vs, 1);
- send_framebuffer_update(vs, 0, 0, 1, 1);
- vnc_flush(vs);
- vs->last_update_time = now;
- vs->update_requested--;
- return;
+ if (now - vs->last_update_time >= VNC_MAX_UPDATE_INTERVAL) {
+ if (!vs->update_requested) {
+ vs->ds->idle = 1;
+ } else {
+ /* Send a null update. If the client is no longer
+ interested (e.g. minimised) it'll ignore this, and we
+ can stop scanning the buffer until it sends another
+ update request. */
+ /* It turns out that there's a bug in realvncviewer 4.1.2
+ which means that if you send a proper null update (with
+ no update rectangles), it gets a bit out of sync and
+ never sends any further requests, regardless of whether
+ it needs one or not. Fix this by sending a single 1x1
+ update rectangle instead. */
+ vnc_write_u8(vs, 0);
+ vnc_write_u8(vs, 0);
+ vnc_write_u16(vs, 1);
+ send_framebuffer_update(vs, 0, 0, 1, 1);
+ vnc_flush(vs);
+ vs->last_update_time = now;
+ vs->update_requested--;
+ return;
+ }
}
}
qemu_mod_timer(vs->timer, now + vs->timer_interval);
qemu_set_fd_handler2(vs->csock, NULL, NULL, NULL, NULL);
closesocket(vs->csock);
vs->csock = -1;
+ vs->ds->idle = 1;
buffer_reset(&vs->input);
buffer_reset(&vs->output);
free_queue(vs);
int keycode;
int shift_keys = 0;
int shift = 0;
+ int keypad = 0;
if (is_graphic_console()) {
if (sym >= 'A' && sym <= 'Z') {
case 0x9d: /* Right CTRL */
case 0x38: /* Left ALT */
case 0xb8: /* Right ALT */
+ if (keycode & 0x80)
+ kbd_put_keycode(0xe0);
if (down) {
vs->modifiers_state[keycode] = 1;
kbd_put_keycode(keycode & 0x7f);
return;
}
- if (keycodeIsKeypad(vs->kbd_layout, keycode)) {
+ keypad = keycodeIsKeypad(vs->kbd_layout, keycode);
+ if (keypad) {
/* If the numlock state needs to change then simulate an additional
keypress before sending this one. This will happen if the user
toggles numlock away from the VNC window.
if (is_graphic_console()) {
/* If the shift state needs to change then simulate an additional
- keypress before sending this one.
+ keypress before sending this one. Ignore for non shiftable keys.
*/
if (shift && !shift_keys) {
press_key_shift_down(vs, down, keycode);
return;
}
- else if (!shift && shift_keys) {
+ else if (!shift && shift_keys && !keypad &&
+ keycodeIsShiftable(vs->kbd_layout, keycode)) {
press_key_shift_up(vs, down, keycode);
return;
}
if (ds->depth == 32) return;
depth = 32;
break;
+ case 8:
case 0:
ds->shared_buf = 0;
return;
default:
return;
}
- if (ds->switchbpp) {
+ if (vs->switchbpp) {
vnc_client_error(vs);
} else if (vs->csock != -1 && vs->has_WMVi) {
/* Sending a WMVi message to notify the client*/
vs->csock = accept(vs->lsock, (struct sockaddr *)&addr, &addrlen);
if (vs->csock != -1) {
VNC_DEBUG("New client on socket %d\n", vs->csock);
+ vs->ds->idle = 0;
socket_set_nonblock(vs->csock);
qemu_set_fd_handler2(vs->csock, NULL, vnc_client_read, NULL, opaque);
vnc_write(vs, "RFB 003.008\n", 12);
exit(1);
ds->opaque = vs;
+ ds->idle = 1;
vnc_state = vs;
vs->display = NULL;
vs->password = NULL;
if (strncmp(options, "password", 8) == 0) {
password = 1; /* Require password auth */
} else if (strncmp(options, "switchbpp", 9) == 0) {
- ds->switchbpp = 1;
+ vs->switchbpp = 1;
#if CONFIG_VNC_TLS
} else if (strncmp(options, "tls", 3) == 0) {
tls = 1; /* Require TLS */
{"Num_Lock", 0xff7f}, /* XK_Num_Lock */
{"Pause", 0xff13}, /* XK_Pause */
{"Escape", 0xff1b}, /* XK_Escape */
+{"ISO_Left_Tab", 0xfe20},/* XK_ISO_Left_Tab */
/* localized keys */
{"BackApostrophe", 0xff21},