direct-io.hg

changeset 11257:86d26e6ec89b

Replace dom0_ops hypercall with three new hypercalls:
1. platform_op -- used by dom0 kernel to perform actions on the
hardware platform (e.g., MTRR access, microcode update, platform
quirks, ...)
2. domctl -- used by management tools to control a specified domain
3. sysctl -- used by management tools for system-wide actions

Benefits include more sensible factoring of actions to
hypercalls. Also allows tool compatibility to be tracked separately
from the dom0 kernel. The assumption is that it will be easier to
replace libxenctrl, libxenguest and Xen as a matched set if the
dom0 kernel does not need to be replaced too (e.g., because that
would require vendor revalidation).

From here on we hope to maintain dom0 kernel compatibility. This
promise is not extended to tool compatibility beyond the existing
guarantee that compatibility will not be broken within a three-level
stable release [3.0.2, 3.0.3, etc.].

Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Aug 25 18:39:10 2006 +0100 (2006-08-25)
parents 9091331dfb35
children 1a3b883614b7
files tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c tools/debugger/libxendebug/Makefile tools/debugger/libxendebug/list.h tools/debugger/libxendebug/xendebug.c tools/debugger/libxendebug/xendebug.h tools/libxc/ia64/xc_ia64_hvm_build.c tools/libxc/ia64/xc_ia64_linux_restore.c tools/libxc/ia64/xc_ia64_linux_save.c tools/libxc/ia64/xc_ia64_stubs.c tools/libxc/powerpc64/xc_linux_build.c tools/libxc/xc_csched.c tools/libxc/xc_domain.c tools/libxc/xc_hvm_build.c tools/libxc/xc_linux_build.c tools/libxc/xc_linux_restore.c tools/libxc/xc_linux_save.c tools/libxc/xc_misc.c tools/libxc/xc_private.c tools/libxc/xc_private.h tools/libxc/xc_ptrace.c tools/libxc/xc_sedf.c tools/libxc/xc_tbuf.c tools/libxc/xenctrl.h tools/libxc/xg_private.h tools/misc/xenperf.c tools/python/xen/lowlevel/xc/xc.c tools/xenmon/setmask.c tools/xenstat/libxenstat/src/xenstat.c xen/arch/ia64/xen/dom0_ops.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/hypercall.c xen/arch/powerpc/dom0_ops.c xen/arch/powerpc/powerpc64/hypercall_table.S xen/arch/x86/Makefile xen/arch/x86/dom0_ops.c xen/arch/x86/domctl.c xen/arch/x86/platform_hypercall.c xen/arch/x86/setup.c xen/arch/x86/shadow2-common.c xen/arch/x86/sysctl.c xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_64/entry.S xen/common/Makefile xen/common/acm_ops.c xen/common/dom0_ops.c xen/common/domain.c xen/common/domctl.c xen/common/perfc.c xen/common/sched_credit.c xen/common/sched_sedf.c xen/common/schedule.c xen/common/sysctl.c xen/common/trace.c xen/include/acm/acm_hooks.h xen/include/asm-ia64/domain.h xen/include/asm-x86/shadow2.h xen/include/public/acm.h xen/include/public/acm_ops.h xen/include/public/arch-ia64.h xen/include/public/arch-powerpc.h xen/include/public/arch-x86_32.h xen/include/public/arch-x86_64.h xen/include/public/dom0_ops.h xen/include/public/domctl.h xen/include/public/platform.h xen/include/public/sched_ctl.h xen/include/public/sysctl.h xen/include/public/xen-compat.h xen/include/public/xen.h xen/include/xen/cpumask.h xen/include/xen/hypercall.h xen/include/xen/sched-if.h xen/include/xen/sched.h xen/include/xen/trace.h
line diff
     1.1 --- a/tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c	Fri Aug 25 10:39:24 2006 +0100
     1.2 +++ b/tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c	Fri Aug 25 18:39:10 2006 +0100
     1.3 @@ -54,15 +54,6 @@ curvcpuid()
     1.4  
     1.5  }
     1.6  
     1.7 -
     1.8 -#define DOMFLAGS_DYING     (1<<0) /* Domain is scheduled to die.             */
     1.9 -#define DOMFLAGS_SHUTDOWN  (1<<2) /* The guest OS has shut down.             */
    1.10 -#define DOMFLAGS_PAUSED    (1<<3) /* Currently paused by control software.   */
    1.11 -#define DOMFLAGS_BLOCKED   (1<<4) /* Currently blocked pending an event.     */
    1.12 -#define DOMFLAGS_RUNNING   (1<<5) /* Domain is currently running.            */
    1.13 -
    1.14 -
    1.15 -
    1.16  struct inferior_list all_processes;
    1.17  static int current_domid;
    1.18  static int expect_signal = 0;
     2.1 --- a/tools/debugger/libxendebug/Makefile	Fri Aug 25 10:39:24 2006 +0100
     2.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.3 @@ -1,75 +0,0 @@
     2.4 -
     2.5 -INSTALL		= install
     2.6 -INSTALL_PROG	= $(INSTALL) -m0755
     2.7 -INSTALL_DATA	= $(INSTALL) -m0644
     2.8 -INSTALL_DIR	= $(INSTALL) -d -m0755
     2.9 -
    2.10 -MAJOR    = 3.0
    2.11 -MINOR    = 0
    2.12 -
    2.13 -XEN_ROOT = ../../..
    2.14 -include $(XEN_ROOT)/tools/Rules.mk
    2.15 -
    2.16 -SRCS     := xendebug.c
    2.17 -
    2.18 -CFLAGS   += -Werror -fno-strict-aliasing
    2.19 -CFLAGS   += $(INCLUDES) -I. -I$(XEN_ROOT)/tools/libxc
    2.20 -# Get gcc to generate the dependencies for us.
    2.21 -CFLAGS   += -Wp,-MD,.$(@F).d
    2.22 -DEPS     = .*.d
    2.23 -
    2.24 -LDFLAGS  += -L$(XEN_ROOT)/tools/libxc -lxenctrl
    2.25 -
    2.26 -LIB_OBJS := $(patsubst %.c,%.o,$(SRCS))
    2.27 -PIC_OBJS := $(patsubst %.c,%.opic,$(SRCS))
    2.28 -
    2.29 -LIB      := libxendebug.a libxendebug.so
    2.30 -LIB      += libxendebug.so.$(MAJOR) libxendebug.so.$(MAJOR).$(MINOR)
    2.31 -
    2.32 -.PHONY: all
    2.33 -all: build
    2.34 -
    2.35 -.PHONY: build
    2.36 -build:
    2.37 -	$(MAKE) $(LIB)
    2.38 -
    2.39 -.PHONY: install
    2.40 -install: build
    2.41 -	[ -d $(DESTDIR)/usr/$(LIBDIR) ] || $(INSTALL_DIR) $(DESTDIR)/usr/$(LIBDIR)
    2.42 -	[ -d $(DESTDIR)/usr/include ] || $(INSTALL_DIR) $(DESTDIR)/usr/include
    2.43 -	$(INSTALL_PROG) libxendebug.so.$(MAJOR).$(MINOR) $(DESTDIR)/usr/$(LIBDIR)
    2.44 -	$(INSTALL_DATA) libxendebug.a $(DESTDIR)/usr/$(LIBDIR)
    2.45 -	ln -sf libxendebug.so.$(MAJOR).$(MINOR) $(DESTDIR)/usr/$(LIBDIR)/libxendebug.so.$(MAJOR)
    2.46 -	ln -sf libxendebug.so.$(MAJOR) $(DESTDIR)/usr/$(LIBDIR)/libxendebug.so
    2.47 -	$(INSTALL_DATA) xendebug.h $(DESTDIR)/usr/include
    2.48 -
    2.49 -.PHONY: TAGS
    2.50 -TAGS:
    2.51 -	etags -t $(SRCS) *.h
    2.52 -
    2.53 -.PHONY: clean
    2.54 -clean:
    2.55 -	rm -rf *.a *.so* *.o *.opic *.rpm $(LIB) *~ $(DEPS) xen
    2.56 -
    2.57 -.PHONY: rpm
    2.58 -rpm: build
    2.59 -	rm -rf staging
    2.60 -	mkdir staging
    2.61 -	mkdir staging/i386
    2.62 -	rpmbuild --define "staging$$PWD/staging" --define '_builddir.' \
    2.63 -		--define "_rpmdir$$PWD/staging" -bb rpm.spec
    2.64 -	mv staging/i386/*.rpm .
    2.65 -	rm -rf staging
    2.66 -
    2.67 -libxendebug.a: $(LIB_OBJS)
    2.68 -	$(AR) rc $@ $^
    2.69 -
    2.70 -libxendebug.so: libxendebug.so.$(MAJOR)
    2.71 -	ln -sf $< $@
    2.72 -libxendebug.so.$(MAJOR): libxendebug.so.$(MAJOR).$(MINOR)
    2.73 -	ln -sf $< $@
    2.74 -
    2.75 -libxendebug.so.$(MAJOR).$(MINOR): $(PIC_OBJS)
    2.76 -	$(CC) $(CFLAGS) $(LDFLAGS) -Wl,-soname -Wl,libxendebug.so.$(MAJOR) -shared -o $@ $^
    2.77 -
    2.78 --include $(DEPS)
     3.1 --- a/tools/debugger/libxendebug/list.h	Fri Aug 25 10:39:24 2006 +0100
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,186 +0,0 @@
     3.4 -#ifndef _LINUX_LIST_H
     3.5 -#define _LINUX_LIST_H
     3.6 -
     3.7 -/*
     3.8 - * Simple doubly linked list implementation.
     3.9 - *
    3.10 - * Some of the internal functions ("__xxx") are useful when
    3.11 - * manipulating whole lists rather than single entries, as
    3.12 - * sometimes we already know the next/prev entries and we can
    3.13 - * generate better code by using them directly rather than
    3.14 - * using the generic single-entry routines.
    3.15 - */
    3.16 -
    3.17 -struct list_head {
    3.18 -	struct list_head *next, *prev;
    3.19 -};
    3.20 -
    3.21 -#define LIST_HEAD_INIT(name) { &(name), &(name) }
    3.22 -
    3.23 -#define LIST_HEAD(name) \
    3.24 -	struct list_head name = LIST_HEAD_INIT(name)
    3.25 -
    3.26 -#define INIT_LIST_HEAD(ptr) do { \
    3.27 -	(ptr)->next = (ptr); (ptr)->prev = (ptr); \
    3.28 -} while (0)
    3.29 -
    3.30 -/*
    3.31 - * Insert a new entry between two known consecutive entries. 
    3.32 - *
    3.33 - * This is only for internal list manipulation where we know
    3.34 - * the prev/next entries already!
    3.35 - */
    3.36 -static __inline__ void __list_add(struct list_head * new,
    3.37 -	struct list_head * prev,
    3.38 -	struct list_head * next)
    3.39 -{
    3.40 -	next->prev = new;
    3.41 -	new->next = next;
    3.42 -	new->prev = prev;
    3.43 -	prev->next = new;
    3.44 -}
    3.45 -
    3.46 -/**
    3.47 - * list_add - add a new entry
    3.48 - * @new: new entry to be added
    3.49 - * @head: list head to add it after
    3.50 - *
    3.51 - * Insert a new entry after the specified head.
    3.52 - * This is good for implementing stacks.
    3.53 - */
    3.54 -static __inline__ void list_add(struct list_head *new, struct list_head *head)
    3.55 -{
    3.56 -	__list_add(new, head, head->next);
    3.57 -}
    3.58 -
    3.59 -/**
    3.60 - * list_add_tail - add a new entry
    3.61 - * @new: new entry to be added
    3.62 - * @head: list head to add it before
    3.63 - *
    3.64 - * Insert a new entry before the specified head.
    3.65 - * This is useful for implementing queues.
    3.66 - */
    3.67 -static __inline__ void list_add_tail(struct list_head *new, struct list_head *head)
    3.68 -{
    3.69 -	__list_add(new, head->prev, head);
    3.70 -}
    3.71 -
    3.72 -/*
    3.73 - * Delete a list entry by making the prev/next entries
    3.74 - * point to each other.
    3.75 - *
    3.76 - * This is only for internal list manipulation where we know
    3.77 - * the prev/next entries already!
    3.78 - */
    3.79 -static __inline__ void __list_del(struct list_head * prev,
    3.80 -				  struct list_head * next)
    3.81 -{
    3.82 -	next->prev = prev;
    3.83 -	prev->next = next;
    3.84 -}
    3.85 -
    3.86 -/**
    3.87 - * list_del - deletes entry from list.
    3.88 - * @entry: the element to delete from the list.
    3.89 - * Note: list_empty on entry does not return true after this, the entry is in an undefined state.
    3.90 - */
    3.91 -static __inline__ void list_del(struct list_head *entry)
    3.92 -{
    3.93 -	__list_del(entry->prev, entry->next);
    3.94 -}
    3.95 -
    3.96 -/**
    3.97 - * list_del_init - deletes entry from list and reinitialize it.
    3.98 - * @entry: the element to delete from the list.
    3.99 - */
   3.100 -static __inline__ void list_del_init(struct list_head *entry)
   3.101 -{
   3.102 -	__list_del(entry->prev, entry->next);
   3.103 -	INIT_LIST_HEAD(entry); 
   3.104 -}
   3.105 -
   3.106 -/**
   3.107 - * list_empty - tests whether a list is empty
   3.108 - * @head: the list to test.
   3.109 - */
   3.110 -static __inline__ int list_empty(struct list_head *head)
   3.111 -{
   3.112 -	return head->next == head;
   3.113 -}
   3.114 -
   3.115 -/**
   3.116 - * list_splice - join two lists
   3.117 - * @list: the new list to add.
   3.118 - * @head: the place to add it in the first list.
   3.119 - */
   3.120 -static __inline__ void list_splice(struct list_head *list, struct list_head *head)
   3.121 -{
   3.122 -	struct list_head *first = list->next;
   3.123 -
   3.124 -	if (first != list) {
   3.125 -		struct list_head *last = list->prev;
   3.126 -		struct list_head *at = head->next;
   3.127 -
   3.128 -		first->prev = head;
   3.129 -		head->next = first;
   3.130 -
   3.131 -		last->next = at;
   3.132 -		at->prev = last;
   3.133 -	}
   3.134 -}
   3.135 -
   3.136 -/**
   3.137 - * list_entry - get the struct for this entry
   3.138 - * @ptr:	the &struct list_head pointer.
   3.139 - * @type:	the type of the struct this is embedded in.
   3.140 - * @member:	the name of the list_struct within the struct.
   3.141 - */
   3.142 -#define list_entry(ptr, type, member) \
   3.143 -	((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
   3.144 -
   3.145 -/**
   3.146 - * list_for_each	-	iterate over a list
   3.147 - * @pos:	the &struct list_head to use as a loop counter.
   3.148 - * @head:	the head for your list.
   3.149 - */
   3.150 -#define list_for_each(pos, head) \
   3.151 -	for (pos = (head)->next; pos != (head); pos = pos->next)
   3.152 -        	
   3.153 -/**
   3.154 - * list_for_each_safe	-	iterate over a list safe against removal of list entry
   3.155 - * @pos:	the &struct list_head to use as a loop counter.
   3.156 - * @n:		another &struct list_head to use as temporary storage
   3.157 - * @head:	the head for your list.
   3.158 - */
   3.159 -#define list_for_each_safe(pos, n, head) \
   3.160 -	for (pos = (head)->next, n = pos->next; pos != (head); \
   3.161 -		pos = n, n = pos->next)
   3.162 -
   3.163 -/**
   3.164 - * list_for_each_entry	-	iterate over list of given type
   3.165 - * @pos:	the type * to use as a loop counter.
   3.166 - * @head:	the head for your list.
   3.167 - * @member:	the name of the list_struct within the struct.
   3.168 - */
   3.169 -#define list_for_each_entry(pos, head, member)				\
   3.170 -	for (pos = list_entry((head)->next, typeof(*pos), member),	\
   3.171 -		     prefetch(pos->member.next);			\
   3.172 -	     &pos->member != (head); 					\
   3.173 -	     pos = list_entry(pos->member.next, typeof(*pos), member),	\
   3.174 -		     prefetch(pos->member.next))
   3.175 -
   3.176 -/**
   3.177 - * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
   3.178 - * @pos:	the type * to use as a loop counter.
   3.179 - * @n:		another type * to use as temporary storage
   3.180 - * @head:	the head for your list.
   3.181 - * @member:	the name of the list_struct within the struct.
   3.182 - */
   3.183 -#define list_for_each_entry_safe(pos, n, head, member)			\
   3.184 -	for (pos = list_entry((head)->next, typeof(*pos), member),	\
   3.185 -		n = list_entry(pos->member.next, typeof(*pos), member);	\
   3.186 -	     &pos->member != (head); 					\
   3.187 -	     pos = n, n = list_entry(n->member.next, typeof(*n), member))
   3.188 -#endif /* _LINUX_LIST_H */
   3.189 -
     4.1 --- a/tools/debugger/libxendebug/xendebug.c	Fri Aug 25 10:39:24 2006 +0100
     4.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.3 @@ -1,599 +0,0 @@
     4.4 -/*
     4.5 - * xendebug.c
     4.6 - *
     4.7 - * alex ho
     4.8 - * http://www.cl.cam.ac.uk/netos/pdb
     4.9 - *
    4.10 - * xendebug_memory_page adapted from xc_ptrace.c
    4.11 - */
    4.12 -
    4.13 -#include <stdio.h>
    4.14 -#include <stdlib.h>
    4.15 -#include <string.h>
    4.16 -#include <errno.h>
    4.17 -#include <sys/mman.h>
    4.18 -#include <xenctrl.h>
    4.19 -#include "list.h"
    4.20 -
    4.21 -#if defined(__i386__)
    4.22 -#define L1_PAGETABLE_SHIFT       12
    4.23 -#define L2_PAGETABLE_SHIFT       22
    4.24 -#elif defined(__x86_64__)
    4.25 -#define L1_PAGETABLE_SHIFT      12
    4.26 -#define L2_PAGETABLE_SHIFT      21
    4.27 -#define L3_PAGETABLE_SHIFT      30
    4.28 -#define L4_PAGETABLE_SHIFT      39
    4.29 -#endif
    4.30 -
    4.31 -#define PAGE_SHIFT L1_PAGETABLE_SHIFT
    4.32 -#define PAGE_SIZE  (1UL<<PAGE_SHIFT)
    4.33 -#define PAGE_MASK  (~(PAGE_SIZE - 1))
    4.34 -
    4.35 -/* from xen/include/asm-x86/processor.h */
    4.36 -#define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
    4.37 -
    4.38 -typedef int boolean;
    4.39 -#define true 1
    4.40 -#define false 0
    4.41 -
    4.42 -
    4.43 -typedef struct bwcpoint                           /* break/watch/catch point */
    4.44 -{
    4.45 -    struct list_head list;
    4.46 -    unsigned long address;
    4.47 -    uint32_t domain;
    4.48 -    uint8_t old_value;                             /* old value for software bkpt */
    4.49 -} bwcpoint_t, *bwcpoint_p;
    4.50 -
    4.51 -static bwcpoint_t bwcpoint_list;
    4.52 -
    4.53 -
    4.54 -
    4.55 -typedef struct domain_context                 /* local cache of domain state */
    4.56 -{
    4.57 -    struct list_head     list;
    4.58 -    uint32_t                  domid;
    4.59 -    boolean              valid[MAX_VIRT_CPUS];
    4.60 -    vcpu_guest_context_t context[MAX_VIRT_CPUS];
    4.61 -
    4.62 -    long            total_pages;
    4.63 -    xen_pfn_t      *page_array;
    4.64 -
    4.65 -    unsigned long   cr3_phys[MAX_VIRT_CPUS];
    4.66 -    unsigned long  *cr3_virt[MAX_VIRT_CPUS];
    4.67 -    unsigned long   pde_phys[MAX_VIRT_CPUS];     
    4.68 -    unsigned long  *pde_virt[MAX_VIRT_CPUS];
    4.69 -    unsigned long   page_phys[MAX_VIRT_CPUS];     
    4.70 -    unsigned long  *page_virt[MAX_VIRT_CPUS];
    4.71 -    int             page_perm[MAX_VIRT_CPUS];
    4.72 -} domain_context_t, *domain_context_p;
    4.73 -
    4.74 -static domain_context_t domain_context_list;
    4.75 -
    4.76 -/* initialization */
    4.77 -
    4.78 -static boolean xendebug_initialized = false;
    4.79 -
    4.80 -static __inline__ void
    4.81 -xendebug_initialize()
    4.82 -{
    4.83 -    if ( !xendebug_initialized )
    4.84 -    {
    4.85 -        memset((void *) &domain_context_list, 0, sizeof(domain_context_t));
    4.86 -        INIT_LIST_HEAD(&domain_context_list.list);
    4.87 -
    4.88 -        memset((void *) &bwcpoint_list, 0, sizeof(bwcpoint_t));
    4.89 -        INIT_LIST_HEAD(&bwcpoint_list.list);
    4.90 -
    4.91 -        xendebug_initialized = true;
    4.92 -    }
    4.93 -}
    4.94 -
    4.95 -/**************/
    4.96 -
    4.97 -static domain_context_p
    4.98 -xendebug_domain_context_search (uint32_t domid)
    4.99 -{
   4.100 -    struct list_head *entry;
   4.101 -    domain_context_p  ctxt;
   4.102 -
   4.103 -    list_for_each(entry, &domain_context_list.list)
   4.104 -    {
   4.105 -        ctxt = list_entry(entry, domain_context_t, list);
   4.106 -        if ( domid == ctxt->domid )
   4.107 -            return ctxt;
   4.108 -    }
   4.109 -    return (domain_context_p)NULL;
   4.110 -}
   4.111 -
   4.112 -static __inline__ domain_context_p
   4.113 -xendebug_get_context (int xc_handle, uint32_t domid, uint32_t vcpu)
   4.114 -{
   4.115 -    int rc;
   4.116 -    domain_context_p ctxt;
   4.117 -
   4.118 -    xendebug_initialize();
   4.119 -
   4.120 -    if ( (ctxt = xendebug_domain_context_search(domid)) == NULL)
   4.121 -        return NULL;
   4.122 -
   4.123 -    if ( !ctxt->valid[vcpu] )
   4.124 -    {
   4.125 -        if ( (rc = xc_vcpu_getcontext(xc_handle, domid, vcpu, 
   4.126 -                                      &ctxt->context[vcpu])) )
   4.127 -            return NULL;
   4.128 -
   4.129 -        ctxt->valid[vcpu] = true;
   4.130 -    }
   4.131 -
   4.132 -    return ctxt;
   4.133 -}
   4.134 -
   4.135 -static __inline__ int
   4.136 -xendebug_set_context (int xc_handle, domain_context_p ctxt, uint32_t vcpu)
   4.137 -{
   4.138 -    dom0_op_t op;
   4.139 -    int rc;
   4.140 -
   4.141 -    if ( !ctxt->valid[vcpu] )
   4.142 -        return -EINVAL;
   4.143 -
   4.144 -    op.interface_version = DOM0_INTERFACE_VERSION;
   4.145 -    op.cmd = DOM0_SETVCPUCONTEXT;
   4.146 -    op.u.setvcpucontext.domain = ctxt->domid;
   4.147 -    op.u.setvcpucontext.vcpu = vcpu;
   4.148 -    op.u.setvcpucontext.ctxt = &ctxt->context[vcpu];
   4.149 -
   4.150 -    if ( (rc = mlock(&ctxt->context[vcpu], sizeof(vcpu_guest_context_t))) )
   4.151 -        return rc;
   4.152 -
   4.153 -    rc = xc_dom0_op(xc_handle, &op);
   4.154 -    (void) munlock(&ctxt->context[vcpu], sizeof(vcpu_guest_context_t));
   4.155 -
   4.156 -    return rc;
   4.157 -}
   4.158 -
   4.159 -/**************/
   4.160 -
   4.161 -int
   4.162 -xendebug_attach(int xc_handle,
   4.163 -                uint32_t domid,
   4.164 -                uint32_t vcpu)
   4.165 -{
   4.166 -    domain_context_p ctxt;
   4.167 -
   4.168 -    xendebug_initialize();
   4.169 -
   4.170 -    if ( (ctxt = malloc(sizeof(domain_context_t))) == NULL )
   4.171 -        return -1;
   4.172 -    memset(ctxt, 0, sizeof(domain_context_t));
   4.173 -    
   4.174 -    ctxt->domid = domid;
   4.175 -    list_add(&ctxt->list, &domain_context_list.list);
   4.176 -
   4.177 -    return xc_domain_pause(xc_handle, domid);
   4.178 -}
   4.179 -
   4.180 -int
   4.181 -xendebug_detach(int xc_handle,
   4.182 -                uint32_t domid,
   4.183 -                uint32_t vcpu)
   4.184 -{
   4.185 -    domain_context_p ctxt;
   4.186 -    
   4.187 -    xendebug_initialize();
   4.188 -
   4.189 -    if ( (ctxt = xendebug_domain_context_search (domid)) == NULL)
   4.190 -        return -EINVAL;
   4.191 -
   4.192 -    list_del(&ctxt->list);
   4.193 -
   4.194 -    if ( ctxt->page_array ) free(ctxt->page_array);
   4.195 -
   4.196 -    free(ctxt);
   4.197 -
   4.198 -    return xc_domain_unpause(xc_handle, domid);
   4.199 -}
   4.200 -
   4.201 -int
   4.202 -xendebug_read_registers(int xc_handle,
   4.203 -                        uint32_t domid,
   4.204 -                        uint32_t vcpu,
   4.205 -                        cpu_user_regs_t **regs)
   4.206 -{
   4.207 -    domain_context_p ctxt;
   4.208 -    int rc = -1;
   4.209 -
   4.210 -    xendebug_initialize();
   4.211 -
   4.212 -    ctxt = xendebug_get_context(xc_handle, domid, vcpu);
   4.213 -    if (ctxt)
   4.214 -    {
   4.215 -        *regs = &ctxt->context[vcpu].user_regs;
   4.216 -        rc = 0;
   4.217 -    }
   4.218 -
   4.219 -    return rc;
   4.220 -}
   4.221 -
   4.222 -int
   4.223 -xendebug_read_fpregisters (int xc_handle,
   4.224 -                           uint32_t domid,
   4.225 -                           uint32_t vcpu,
   4.226 -                           char **regs)
   4.227 -{
   4.228 -    domain_context_p ctxt;
   4.229 -    int rc = -1;
   4.230 -
   4.231 -    xendebug_initialize();
   4.232 -
   4.233 -    ctxt = xendebug_get_context(xc_handle, domid, vcpu);
   4.234 -    if (ctxt)
   4.235 -    {
   4.236 -        *regs = ctxt->context[vcpu].fpu_ctxt.x;
   4.237 -        rc = 0;
   4.238 -    }
   4.239 -
   4.240 -    return rc;
   4.241 -}
   4.242 -
   4.243 -int
   4.244 -xendebug_write_registers(int xc_handle,
   4.245 -                         uint32_t domid,
   4.246 -                         uint32_t vcpu,
   4.247 -                         cpu_user_regs_t *regs)
   4.248 -{
   4.249 -    domain_context_p ctxt;
   4.250 -    int rc = -1;
   4.251 -
   4.252 -    xendebug_initialize();
   4.253 -
   4.254 -    ctxt = xendebug_get_context(xc_handle, domid, vcpu);
   4.255 -    if (ctxt)
   4.256 -    {
   4.257 -        memcpy(&ctxt->context[vcpu].user_regs, regs, sizeof(cpu_user_regs_t));
   4.258 -        rc = xendebug_set_context(xc_handle, ctxt, vcpu);
   4.259 -    }
   4.260 -    
   4.261 -    return rc;
   4.262 -}
   4.263 -
   4.264 -int
   4.265 -xendebug_step(int xc_handle,
   4.266 -              uint32_t domid,
   4.267 -              uint32_t vcpu)
   4.268 -{
   4.269 -    domain_context_p ctxt;
   4.270 -    int rc;
   4.271 -
   4.272 -    xendebug_initialize();
   4.273 -
   4.274 -    ctxt = xendebug_get_context(xc_handle, domid, vcpu);
   4.275 -    if (!ctxt) return -EINVAL;
   4.276 -
   4.277 -    ctxt->context[vcpu].user_regs.eflags |= X86_EFLAGS_TF;
   4.278 -
   4.279 -    if ( (rc = xendebug_set_context(xc_handle, ctxt, vcpu)) )
   4.280 -        return rc;
   4.281 -
   4.282 -    ctxt->valid[vcpu] = false;
   4.283 -    return xc_domain_unpause(xc_handle, domid);
   4.284 -}
   4.285 -
   4.286 -int
   4.287 -xendebug_continue(int xc_handle,
   4.288 -                  uint32_t domid,
   4.289 -                  uint32_t vcpu)
   4.290 -{
   4.291 -    domain_context_p ctxt;
   4.292 -    int rc;
   4.293 -
   4.294 -    xendebug_initialize();
   4.295 -
   4.296 -    ctxt = xendebug_get_context(xc_handle, domid, vcpu);
   4.297 -    if (!ctxt) return -EINVAL;
   4.298 -
   4.299 -    if ( ctxt->context[vcpu].user_regs.eflags & X86_EFLAGS_TF )
   4.300 -    {
   4.301 -        ctxt->context[vcpu].user_regs.eflags &= ~X86_EFLAGS_TF;
   4.302 -        if ( (rc = xendebug_set_context(xc_handle, ctxt, vcpu)) )
   4.303 -            return rc;
   4.304 -    }
   4.305 -    ctxt->valid[vcpu] = false;
   4.306 -    return xc_domain_unpause(xc_handle, domid);
   4.307 -}
   4.308 -
   4.309 -/*************************************************/
   4.310 -
   4.311 -#define vtopdi(va) ((va) >> L2_PAGETABLE_SHIFT)
   4.312 -#define vtopti(va) (((va) >> PAGE_SHIFT) & 0x3ff)
   4.313 -
   4.314 -/* access to one page */
   4.315 -static int
   4.316 -xendebug_memory_page (domain_context_p ctxt, int xc_handle, uint32_t vcpu,
   4.317 -                      int protection, unsigned long address, int length, uint8_t *buffer)
   4.318 -{
   4.319 -    vcpu_guest_context_t *vcpu_ctxt = &ctxt->context[vcpu];
   4.320 -    unsigned long pde, page;
   4.321 -    unsigned long va = (unsigned long)address;
   4.322 -    void *ptr;
   4.323 -    long pages;
   4.324 -
   4.325 -    pages = xc_get_tot_pages(xc_handle, ctxt->domid);
   4.326 -
   4.327 -    if ( ctxt->total_pages != pages )
   4.328 -    {
   4.329 -        if ( ctxt->total_pages > 0 ) free( ctxt->page_array );
   4.330 -        ctxt->total_pages = pages;
   4.331 -
   4.332 -        ctxt->page_array = malloc(pages * sizeof(unsigned long));
   4.333 -        if ( ctxt->page_array == NULL )
   4.334 -        {
   4.335 -            printf("Could not allocate memory\n");
   4.336 -            return 0;
   4.337 -        }
   4.338 -
   4.339 -        if ( xc_get_pfn_list(xc_handle, ctxt->domid, ctxt->page_array,pages) !=
   4.340 -                pages )
   4.341 -        {
   4.342 -            printf("Could not get the page frame list\n");
   4.343 -            return 0;
   4.344 -        }
   4.345 -    }
   4.346 -
   4.347 -    if ( vcpu_ctxt->ctrlreg[3] != ctxt->cr3_phys[vcpu]) 
   4.348 -    {
   4.349 -        ctxt->cr3_phys[vcpu] = vcpu_ctxt->ctrlreg[3];
   4.350 -        if ( ctxt->cr3_virt[vcpu] )
   4.351 -            munmap(ctxt->cr3_virt[vcpu], PAGE_SIZE);
   4.352 -        ctxt->cr3_virt[vcpu] = xc_map_foreign_range(
   4.353 -            xc_handle, ctxt->domid, PAGE_SIZE, PROT_READ,
   4.354 -            xen_cr3_to_pfn(ctxt->cr3_phys[vcpu]));
   4.355 -        if ( ctxt->cr3_virt[vcpu] == NULL )
   4.356 -            return 0;
   4.357 -    } 
   4.358 -
   4.359 -
   4.360 -    if ( (pde = ctxt->cr3_virt[vcpu][vtopdi(va)]) == 0) /* logical address */
   4.361 -        return 0;
   4.362 -    if (ctxt->context[vcpu].flags & VGCF_HVM_GUEST)
   4.363 -        pde = ctxt->page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
   4.364 -    if (pde != ctxt->pde_phys[vcpu]) 
   4.365 -    {
   4.366 -        ctxt->pde_phys[vcpu] = pde;
   4.367 -        if ( ctxt->pde_virt[vcpu])
   4.368 -            munmap(ctxt->pde_virt[vcpu], PAGE_SIZE);
   4.369 -        ctxt->pde_virt[vcpu] = xc_map_foreign_range(xc_handle, ctxt->domid,
   4.370 -                    PAGE_SIZE, PROT_READ, ctxt->pde_phys[vcpu] >> PAGE_SHIFT);
   4.371 -        if ( ctxt->pde_virt[vcpu] == NULL )
   4.372 -            return 0;
   4.373 -    }
   4.374 -
   4.375 -    if ((page = ctxt->pde_virt[vcpu][vtopti(va)]) == 0) /* logical address */
   4.376 -        return 0;
   4.377 -    if (ctxt->context[vcpu].flags & VGCF_HVM_GUEST)
   4.378 -        page = ctxt->page_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
   4.379 -    if (page != ctxt->page_phys[vcpu] || protection != ctxt->page_perm[vcpu]) 
   4.380 -    {
   4.381 -        ctxt->page_phys[vcpu] = page;
   4.382 -        if (ctxt->page_virt[vcpu])
   4.383 -            munmap(ctxt->page_virt[vcpu], PAGE_SIZE);
   4.384 -        ctxt->page_virt[vcpu] = xc_map_foreign_range(xc_handle, ctxt->domid, 
   4.385 -                  PAGE_SIZE, protection, ctxt->page_phys[vcpu] >> PAGE_SHIFT);
   4.386 -        if ( ctxt->page_virt[vcpu] == NULL )
   4.387 -        {
   4.388 -            printf("cr3 %lx pde %lx page %lx pti %lx\n", 
   4.389 -                   vcpu_ctxt->ctrlreg[3], pde, page, vtopti(va));
   4.390 -            ctxt->page_phys[vcpu] = 0;
   4.391 -            return 0;
   4.392 -        }
   4.393 -        ctxt->page_perm[vcpu] = protection;
   4.394 -    }	
   4.395 -
   4.396 -    ptr = (void *)( (unsigned long)ctxt->page_virt[vcpu] |
   4.397 -                    (va & ~PAGE_MASK) );
   4.398 -
   4.399 -    if ( protection & PROT_WRITE )
   4.400 -    {
   4.401 -        memcpy(ptr, buffer, length);
   4.402 -    }
   4.403 -    else
   4.404 -    {
   4.405 -        memcpy(buffer, ptr, length);
   4.406 -    }
   4.407 -
   4.408 -    return length;
   4.409 -}
   4.410 -
   4.411 -/* divide a memory operation into accesses to individual pages */
   4.412 -static int
   4.413 -xendebug_memory_op (domain_context_p ctxt, int xc_handle, uint32_t vcpu,
   4.414 -                    int protection, unsigned long address, int length, uint8_t *buffer)
   4.415 -{
   4.416 -    int      remain;              /* number of bytes to touch past this page */
   4.417 -    int      bytes   = 0;
   4.418 -
   4.419 -    while ( (remain = (address + length - 1) - (address | (PAGE_SIZE-1))) > 0)
   4.420 -    {
   4.421 -        bytes += xendebug_memory_page(ctxt, xc_handle, vcpu, protection,
   4.422 -                                      address, length - remain, buffer);
   4.423 -        buffer += (length - remain);
   4.424 -        length = remain;
   4.425 -        address = (address | (PAGE_SIZE - 1)) + 1;
   4.426 -    }
   4.427 -
   4.428 -    bytes += xendebug_memory_page(ctxt, xc_handle, vcpu, protection,
   4.429 -                                  address, length, buffer);
   4.430 -
   4.431 -    return bytes;
   4.432 -}
   4.433 -
   4.434 -int
   4.435 -xendebug_read_memory(int xc_handle,
   4.436 -                     uint32_t domid,
   4.437 -                     uint32_t vcpu,
   4.438 -                     unsigned long address,
   4.439 -                     uint32_t length,
   4.440 -                     uint8_t *data)
   4.441 -{
   4.442 -    domain_context_p ctxt;
   4.443 -
   4.444 -    xendebug_initialize();
   4.445 -
   4.446 -    ctxt = xendebug_get_context(xc_handle, domid, vcpu);
   4.447 -
   4.448 -    xendebug_memory_op(ctxt, xc_handle, vcpu, PROT_READ, 
   4.449 -                       address, length, data);
   4.450 -
   4.451 -    return 0;
   4.452 -}
   4.453 -
   4.454 -int
   4.455 -xendebug_write_memory(int xc_handle,
   4.456 -                      uint32_t domid,
   4.457 -                      uint32_t vcpu,
   4.458 -                      unsigned long address,
   4.459 -                      uint32_t length,
   4.460 -                      uint8_t *data)
   4.461 -{
   4.462 -    domain_context_p ctxt;
   4.463 -
   4.464 -    xendebug_initialize();
   4.465 -
   4.466 -    ctxt = xendebug_get_context(xc_handle, domid, vcpu);
   4.467 -    xendebug_memory_op(ctxt, xc_handle, vcpu, PROT_READ | PROT_WRITE,
   4.468 -
   4.469 -                       address, length, data);
   4.470 -
   4.471 -    return 0;
   4.472 -}
   4.473 -
   4.474 -int
   4.475 -xendebug_insert_memory_breakpoint(int xc_handle,
   4.476 -                                  uint32_t domid,
   4.477 -                                  uint32_t vcpu,
   4.478 -                                  unsigned long address,
   4.479 -                                  uint32_t length)
   4.480 -{
   4.481 -    bwcpoint_p bkpt;
   4.482 -    uint8_t breakpoint_opcode = 0xcc;
   4.483 -
   4.484 -    printf("insert breakpoint %d:%lx %d\n",
   4.485 -            domid, address, length);
   4.486 -
   4.487 -    xendebug_initialize();
   4.488 -
   4.489 -    bkpt = malloc(sizeof(bwcpoint_t));
   4.490 -    if ( bkpt == NULL )
   4.491 -    {
   4.492 -        printf("error: breakpoint length should be 1\n");
   4.493 -        return -1;
   4.494 -    }
   4.495 -
   4.496 -    if ( length != 1 )
   4.497 -    {
   4.498 -        printf("error: breakpoint length should be 1\n");
   4.499 -        free(bkpt);
   4.500 -        return -1;
   4.501 -    }
   4.502 -
   4.503 -    bkpt->address = address;
   4.504 -    bkpt->domain  = domid;
   4.505 -
   4.506 -    xendebug_read_memory(xc_handle, domid, vcpu, address, 1,
   4.507 -                         &bkpt->old_value);
   4.508 -
   4.509 -    xendebug_write_memory(xc_handle, domid, vcpu, address, 1, 
   4.510 -                          &breakpoint_opcode);
   4.511 -    
   4.512 -    list_add(&bkpt->list, &bwcpoint_list.list);
   4.513 -
   4.514 -    printf("breakpoint_set %d:%lx 0x%x\n",
   4.515 -           domid, address, bkpt->old_value);
   4.516 -
   4.517 -    return 0;
   4.518 -}
   4.519 -
   4.520 -int
   4.521 -xendebug_remove_memory_breakpoint(int xc_handle,
   4.522 -                                  uint32_t domid,
   4.523 -                                  uint32_t vcpu,
   4.524 -                                  unsigned long address,
   4.525 -                                  uint32_t length)
   4.526 -{
   4.527 -    bwcpoint_p bkpt = NULL;
   4.528 -
   4.529 -    printf ("remove breakpoint %d:%lx\n",
   4.530 -            domid, address);
   4.531 -
   4.532 -    struct list_head *entry;
   4.533 -    list_for_each(entry, &bwcpoint_list.list)
   4.534 -    {
   4.535 -        bkpt = list_entry(entry, bwcpoint_t, list);
   4.536 -        if ( domid == bkpt->domain && address == bkpt->address )
   4.537 -            break;
   4.538 -    }
   4.539 -    
   4.540 -    if (bkpt == &bwcpoint_list || bkpt == NULL)
   4.541 -    {
   4.542 -        printf ("error: no breakpoint found\n");
   4.543 -        return -1;
   4.544 -    }
   4.545 -
   4.546 -    list_del(&bkpt->list);
   4.547 -
   4.548 -    xendebug_write_memory(xc_handle, domid, vcpu, address, 1, 
   4.549 -                          &bkpt->old_value);
   4.550 -
   4.551 -    free(bkpt);
   4.552 -    return 0;
   4.553 -}
   4.554 -
   4.555 -int
   4.556 -xendebug_query_domain_stop(int xc_handle, int *dom_list, int dom_list_size)
   4.557 -{
   4.558 -    xc_dominfo_t *info;
   4.559 -    uint32_t first_dom = 0;
   4.560 -    int max_doms = 1024;
   4.561 -    int nr_doms, loop;
   4.562 -    int count = 0;
   4.563 -
   4.564 -    if ( (info = malloc(max_doms * sizeof(xc_dominfo_t))) == NULL )
   4.565 -        return -ENOMEM;
   4.566 -
   4.567 -    nr_doms = xc_domain_getinfo(xc_handle, first_dom, max_doms, info);
   4.568 -
   4.569 -    for (loop = 0; loop < nr_doms; loop++)
   4.570 -    {
   4.571 -        printf ("domid: %d", info[loop].domid);
   4.572 -        printf (" %c%c%c%c%c%c",
   4.573 -                info[loop].dying ? 'D' : '-',
   4.574 -                info[loop].crashed ? 'C' : '-',
   4.575 -                info[loop].shutdown ? 'S' : '-',
   4.576 -                info[loop].paused ? 'P' : '-',
   4.577 -                info[loop].blocked ? 'B' : '-',
   4.578 -                info[loop].running ? 'R' : '-');
   4.579 -        printf (" pages: %ld, vcpus %d", 
   4.580 -                info[loop].nr_pages, info[loop].vcpus);
   4.581 -        printf ("\n");
   4.582 -
   4.583 -        if ( info[loop].paused && count < dom_list_size)
   4.584 -        {
   4.585 -            dom_list[count++] = info[loop].domid;
   4.586 -        }
   4.587 -    }
   4.588 -
   4.589 -    free(info);
   4.590 -
   4.591 -    return count;
   4.592 -}
   4.593 -
   4.594 -/*
   4.595 - * Local variables:
   4.596 - * mode: C
   4.597 - * c-set-style: "BSD"
   4.598 - * c-basic-offset: 4
   4.599 - * tab-width: 4
   4.600 - * indent-tabs-mode: nil
   4.601 - * End:
   4.602 - */
     5.1 --- a/tools/debugger/libxendebug/xendebug.h	Fri Aug 25 10:39:24 2006 +0100
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,78 +0,0 @@
     5.4 -/*
     5.5 - * xendebug.h
     5.6 - *
     5.7 - * alex ho
     5.8 - * http://www.cl.cam.ac.uk/netos/pdb
     5.9 - *
    5.10 - */
    5.11 -
    5.12 -#ifndef _XENDEBUG_H_DEFINED
    5.13 -#define _XENDEBUG_H_DEFINED
    5.14 -
    5.15 -#include <xenctrl.h>
    5.16 -
    5.17 -int xendebug_attach(int xc_handle,
    5.18 -		    uint32_t domid,
    5.19 -		    uint32_t vcpu);
    5.20 -
    5.21 -int xendebug_detach(int xc_handle,
    5.22 -		    uint32_t domid,
    5.23 -		    uint32_t vcpu);
    5.24 -
    5.25 -int xendebug_read_registers(int xc_handle,
    5.26 -			    uint32_t domid,
    5.27 -			    uint32_t vcpu,
    5.28 -			    cpu_user_regs_t **regs);
    5.29 -
    5.30 -int xendebug_read_fpregisters (int xc_handle,
    5.31 -			       uint32_t domid,
    5.32 -			       uint32_t vcpu,
    5.33 -			       char **regs);
    5.34 -
    5.35 -int xendebug_write_registers(int xc_handle,
    5.36 -			     uint32_t domid,
    5.37 -			     uint32_t vcpu,
    5.38 -			     cpu_user_regs_t *regs);
    5.39 -
    5.40 -int xendebug_step(int xc_handle,
    5.41 -		  uint32_t domid,
    5.42 -		  uint32_t vcpu);
    5.43 -
    5.44 -int xendebug_continue(int xc_handle,
    5.45 -		      uint32_t domid,
    5.46 -		      uint32_t vcpu);
    5.47 -
    5.48 -int xendebug_read_memory(int xc_handle,
    5.49 -			 uint32_t domid,
    5.50 -			 uint32_t vcpu,
    5.51 -			 unsigned long address,
    5.52 -			 uint32_t length,
    5.53 -			 uint8_t *data);
    5.54 -
    5.55 -
    5.56 -int xendebug_write_memory(int xc_handle,
    5.57 -			  uint32_t domid,
    5.58 -			  uint32_t vcpu,
    5.59 -			  unsigned long address,
    5.60 -			  uint32_t length,
    5.61 -			  uint8_t *data);
    5.62 -
    5.63 -
    5.64 -int xendebug_insert_memory_breakpoint(int xc_handle,
    5.65 -				      uint32_t domid,
    5.66 -				      uint32_t vcpu,
    5.67 -				      unsigned long address,
    5.68 -				      uint32_t length);
    5.69 -
    5.70 -int xendebug_remove_memory_breakpoint(int xc_handle,
    5.71 -				      uint32_t domid,
    5.72 -				      uint32_t vcpu,
    5.73 -				      unsigned long address,
    5.74 -				      uint32_t length);
    5.75 -
    5.76 -int xendebug_query_domain_stop(int xc_handle,
    5.77 -			       int *dom_list, 
    5.78 -			       int dom_list_size);
    5.79 -
    5.80 -
    5.81 -#endif /* _XENDEBUG_H_DEFINED */
     6.1 --- a/tools/libxc/ia64/xc_ia64_hvm_build.c	Fri Aug 25 10:39:24 2006 +0100
     6.2 +++ b/tools/libxc/ia64/xc_ia64_hvm_build.c	Fri Aug 25 18:39:10 2006 +0100
     6.3 @@ -555,7 +555,7 @@ setup_guest(int xc_handle, uint32_t dom,
     6.4      shared_iopage_t *sp;
     6.5      int i;
     6.6      unsigned long dom_memsize = (memsize << 20);
     6.7 -    DECLARE_DOM0_OP;
     6.8 +    DECLARE_DOMCTL;
     6.9  
    6.10      if ((image_size > 12 * MEM_M) || (image_size & (PAGE_SIZE - 1))) {
    6.11          PERROR("Guest firmware size is incorrect [%ld]?", image_size);
    6.12 @@ -563,13 +563,12 @@ setup_guest(int xc_handle, uint32_t dom,
    6.13      }
    6.14  
    6.15      /* This will creates the physmap.  */
    6.16 -    op.u.domain_setup.flags = XEN_DOMAINSETUP_hvm_guest;
    6.17 -    op.u.domain_setup.domain = (domid_t)dom;
    6.18 -    op.u.domain_setup.bp = 0;
    6.19 -    op.u.domain_setup.maxmem = 0;
    6.20 -    
    6.21 -    op.cmd = DOM0_DOMAIN_SETUP;
    6.22 -    if (xc_dom0_op(xc_handle, &op))
    6.23 +    domctl.u.arch_setup.flags = XEN_DOMAINSETUP_hvm_guest;
    6.24 +    domctl.u.arch_setup.bp = 0;
    6.25 +    domctl.u.arch_setup.maxmem = 0;
    6.26 +    domctl.cmd = XEN_DOMCTL_arch_setup;
    6.27 +    domctl.domain = (domid_t)dom;
    6.28 +    if (xc_domctl(xc_handle, &domctl))
    6.29          goto error_out;
    6.30  
    6.31      /* Load guest firmware */
    6.32 @@ -631,7 +630,7 @@ xc_hvm_build(int xc_handle, uint32_t dom
    6.33               unsigned int acpi, unsigned int apic, unsigned int store_evtchn,
    6.34               unsigned long *store_mfn)
    6.35  {
    6.36 -    dom0_op_t launch_op, op;
    6.37 +    struct xen_domctl launch_domctl, domctl;
    6.38      int rc;
    6.39      vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
    6.40      char *image = NULL;
    6.41 @@ -657,10 +656,10 @@ xc_hvm_build(int xc_handle, uint32_t dom
    6.42          return 1;
    6.43      }
    6.44  
    6.45 -    op.cmd = DOM0_GETDOMAININFO;
    6.46 -    op.u.getdomaininfo.domain = (domid_t)domid;
    6.47 -    if (do_dom0_op(xc_handle, &op) < 0 ||
    6.48 -        (uint16_t)op.u.getdomaininfo.domain != domid) {
    6.49 +    domctl.cmd = XEN_DOMCTL_getdomaininfo;
    6.50 +    domctl.domain = (domid_t)domid;
    6.51 +    if (do_domctl(xc_handle, &domctl) < 0 ||
    6.52 +        (uint16_t)domctl.domain != domid) {
    6.53          PERROR("Could not get info on domain");
    6.54          goto error_out;
    6.55      }
    6.56 @@ -677,14 +676,14 @@ xc_hvm_build(int xc_handle, uint32_t dom
    6.57  
    6.58      ctxt->user_regs.cr_iip = 0x80000000ffffffb0UL;
    6.59  
    6.60 -    memset(&launch_op, 0, sizeof(launch_op));
    6.61 +    memset(&launch_domctl, 0, sizeof(launch_domctl));
    6.62  
    6.63 -    launch_op.u.setvcpucontext.domain = (domid_t)domid;
    6.64 -    launch_op.u.setvcpucontext.vcpu = 0;
    6.65 -    set_xen_guest_handle(launch_op.u.setvcpucontext.ctxt, ctxt);
    6.66 +    launch_domctl.domain = (domid_t)domid;
    6.67 +    launch_domctl.u.vcpucontext.vcpu = 0;
    6.68 +    set_xen_guest_handle(launch_domctl.u.vcpucontext.ctxt, ctxt);
    6.69  
    6.70 -    launch_op.cmd = DOM0_SETVCPUCONTEXT;
    6.71 -    rc = do_dom0_op(xc_handle, &launch_op);
    6.72 +    launch_domctl.cmd = XEN_DOMCTL_setvcpucontext;
    6.73 +    rc = do_domctl(xc_handle, &launch_domctl);
    6.74      return rc;
    6.75  
    6.76  error_out:
     7.1 --- a/tools/libxc/ia64/xc_ia64_linux_restore.c	Fri Aug 25 10:39:24 2006 +0100
     7.2 +++ b/tools/libxc/ia64/xc_ia64_linux_restore.c	Fri Aug 25 18:39:10 2006 +0100
     7.3 @@ -61,7 +61,7 @@ xc_linux_restore(int xc_handle, int io_f
     7.4                   unsigned long *store_mfn, unsigned int console_evtchn,
     7.5                   unsigned long *console_mfn)
     7.6  {
     7.7 -    DECLARE_DOM0_OP;
     7.8 +    DECLARE_DOMCTL;
     7.9      int rc = 1, i;
    7.10      unsigned long mfn, pfn;
    7.11      unsigned long ver;
    7.12 @@ -94,19 +94,19 @@ xc_linux_restore(int xc_handle, int io_f
    7.13      }
    7.14  
    7.15      if (mlock(&ctxt, sizeof(ctxt))) {
    7.16 -        /* needed for build dom0 op, but might as well do early */
    7.17 +        /* needed for build domctl, but might as well do early */
    7.18          ERR("Unable to mlock ctxt");
    7.19          return 1;
    7.20      }
    7.21  
    7.22      /* Get the domain's shared-info frame. */
    7.23 -    op.cmd = DOM0_GETDOMAININFO;
    7.24 -    op.u.getdomaininfo.domain = (domid_t)dom;
    7.25 -    if (xc_dom0_op(xc_handle, &op) < 0) {
    7.26 +    domctl.cmd = XEN_DOMCTL_getdomaininfo;
    7.27 +    domctl.domain = (domid_t)dom;
    7.28 +    if (xc_domctl(xc_handle, &domctl) < 0) {
    7.29          ERR("Could not get information on new domain");
    7.30          goto out;
    7.31      }
    7.32 -    shared_info_frame = op.u.getdomaininfo.shared_info_frame;
    7.33 +    shared_info_frame = domctl.u.getdomaininfo.shared_info_frame;
    7.34  
    7.35      if (xc_domain_setmaxmem(xc_handle, dom, PFN_TO_KB(max_pfn)) != 0) {
    7.36          errno = ENOMEM;
    7.37 @@ -122,20 +122,20 @@ xc_linux_restore(int xc_handle, int io_f
    7.38  
    7.39      DPRINTF("Increased domain reservation by %ld KB\n", PFN_TO_KB(max_pfn));
    7.40  
    7.41 -    if (!read_exact(io_fd, &op.u.domain_setup, sizeof(op.u.domain_setup))) {
    7.42 +    if (!read_exact(io_fd, &domctl.u.arch_setup, sizeof(domctl.u.arch_setup))) {
    7.43          ERR("read: domain setup");
    7.44          goto out;
    7.45      }
    7.46  
    7.47      /* Build firmware (will be overwritten).  */
    7.48 -    op.u.domain_setup.domain = (domid_t)dom;
    7.49 -    op.u.domain_setup.flags &= ~XEN_DOMAINSETUP_query;
    7.50 -    op.u.domain_setup.bp = ((nr_pfns - 3) << PAGE_SHIFT)
    7.51 +    domctl.domain = (domid_t)dom;
    7.52 +    domctl.u.arch_setup.flags &= ~XEN_DOMAINSETUP_query;
    7.53 +    domctl.u.arch_setup.bp = ((nr_pfns - 3) << PAGE_SHIFT)
    7.54                             + sizeof (start_info_t);
    7.55 -    op.u.domain_setup.maxmem = (nr_pfns - 3) << PAGE_SHIFT;
    7.56 +    domctl.u.arch_setup.maxmem = (nr_pfns - 3) << PAGE_SHIFT;
    7.57      
    7.58 -    op.cmd = DOM0_DOMAIN_SETUP;
    7.59 -    if (xc_dom0_op(xc_handle, &op))
    7.60 +    domctl.cmd = XEN_DOMCTL_arch_setup;
    7.61 +    if (xc_domctl(xc_handle, &domctl))
    7.62          goto out;
    7.63  
    7.64      /* Get pages.  */
    7.65 @@ -226,22 +226,22 @@ xc_linux_restore(int xc_handle, int io_f
    7.66      }
    7.67  
    7.68      /* First to initialize.  */
    7.69 -    op.cmd = DOM0_SETVCPUCONTEXT;
    7.70 -    op.u.setvcpucontext.domain = (domid_t)dom;
    7.71 -    op.u.setvcpucontext.vcpu   = 0;
    7.72 -    set_xen_guest_handle(op.u.setvcpucontext.ctxt, &ctxt);
    7.73 -    if (xc_dom0_op(xc_handle, &op) != 0) {
    7.74 +    domctl.cmd = XEN_DOMCTL_setvcpucontext;
    7.75 +    domctl.domain = (domid_t)dom;
    7.76 +    domctl.u.vcpucontext.vcpu   = 0;
    7.77 +    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt);
    7.78 +    if (xc_domctl(xc_handle, &domctl) != 0) {
    7.79  	    ERR("Couldn't set vcpu context");
    7.80  	    goto out;
    7.81      }
    7.82  
    7.83      /* Second to set registers...  */
    7.84      ctxt.flags = VGCF_EXTRA_REGS;
    7.85 -    op.cmd = DOM0_SETVCPUCONTEXT;
    7.86 -    op.u.setvcpucontext.domain = (domid_t)dom;
    7.87 -    op.u.setvcpucontext.vcpu   = 0;
    7.88 -    set_xen_guest_handle(op.u.setvcpucontext.ctxt, &ctxt);
    7.89 -    if (xc_dom0_op(xc_handle, &op) != 0) {
    7.90 +    domctl.cmd = XEN_DOMCTL_setvcpucontext;
    7.91 +    domctl.domain = (domid_t)dom;
    7.92 +    domctl.u.vcpucontext.vcpu   = 0;
    7.93 +    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt);
    7.94 +    if (xc_domctl(xc_handle, &domctl) != 0) {
    7.95  	    ERR("Couldn't set vcpu context");
    7.96  	    goto out;
    7.97      }
     8.1 --- a/tools/libxc/ia64/xc_ia64_linux_save.c	Fri Aug 25 10:39:24 2006 +0100
     8.2 +++ b/tools/libxc/ia64/xc_ia64_linux_save.c	Fri Aug 25 18:39:10 2006 +0100
     8.3 @@ -60,7 +60,7 @@ static int xc_ia64_shadow_control(int xc
     8.4                                    unsigned int sop,
     8.5                                    unsigned long *dirty_bitmap,
     8.6                                    unsigned long pages,
     8.7 -                                  xc_shadow_control_stats_t *stats)
     8.8 +                                  xc_shadow_op_stats_t *stats)
     8.9  {
    8.10      if (dirty_bitmap != NULL && pages > 0) {
    8.11          int i;
    8.12 @@ -137,7 +137,7 @@ int
    8.13  xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
    8.14                uint32_t max_factor, uint32_t flags, int (*suspend)(int))
    8.15  {
    8.16 -    DECLARE_DOM0_OP;
    8.17 +    DECLARE_DOMCTL;
    8.18      xc_dominfo_t info;
    8.19  
    8.20      int rc = 1;
    8.21 @@ -242,15 +242,15 @@ xc_linux_save(int xc_handle, int io_fd, 
    8.22          }
    8.23      }
    8.24  
    8.25 -    op.cmd = DOM0_DOMAIN_SETUP;
    8.26 -    op.u.domain_setup.domain = (domid_t)dom;
    8.27 -    op.u.domain_setup.flags = XEN_DOMAINSETUP_query;
    8.28 -    if (xc_dom0_op(xc_handle, &op) < 0) {
    8.29 +    domctl.cmd = XEN_DOMCTL_arch_setup;
    8.30 +    domctl.domain = (domid_t)dom;
    8.31 +    domctl.u.arch_setup.flags = XEN_DOMAINSETUP_query;
    8.32 +    if (xc_domctl(xc_handle, &domctl) < 0) {
    8.33          ERR("Could not get domain setup");
    8.34          goto out;
    8.35      }
    8.36 -    op.u.domain_setup.domain = 0;
    8.37 -    if (!write_exact(io_fd, &op.u.domain_setup, sizeof(op.u.domain_setup))) {
    8.38 +    if (!write_exact(io_fd, &domctl.u.arch_setup,
    8.39 +                     sizeof(domctl.u.arch_setup))) {
    8.40          ERR("write: domain setup");
    8.41          goto out;
    8.42      }
    8.43 @@ -259,7 +259,7 @@ xc_linux_save(int xc_handle, int io_fd, 
    8.44      if (live) {
    8.45  
    8.46          if (xc_ia64_shadow_control(xc_handle, dom,
    8.47 -                                   DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY,
    8.48 +                                   XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
    8.49                                     NULL, 0, NULL ) < 0) {
    8.50              ERR("Couldn't enable shadow mode");
    8.51              goto out;
    8.52 @@ -324,7 +324,7 @@ xc_linux_save(int xc_handle, int io_fd, 
    8.53             but this is fast enough for the moment. */
    8.54          if (!last_iter) {
    8.55              if (xc_ia64_shadow_control(xc_handle, dom,
    8.56 -                                       DOM0_SHADOW_CONTROL_OP_PEEK,
    8.57 +                                       XEN_DOMCTL_SHADOW_OP_PEEK,
    8.58                                         to_skip, max_pfn, NULL) != max_pfn) {
    8.59                  ERR("Error peeking shadow bitmap");
    8.60                  goto out;
    8.61 @@ -392,7 +392,7 @@ xc_linux_save(int xc_handle, int io_fd, 
    8.62  
    8.63              /* Pages to be sent are pages which were dirty.  */
    8.64              if (xc_ia64_shadow_control(xc_handle, dom,
    8.65 -                                       DOM0_SHADOW_CONTROL_OP_CLEAN,
    8.66 +                                       XEN_DOMCTL_SHADOW_OP_CLEAN,
    8.67                                         to_send, max_pfn, NULL ) != max_pfn) {
    8.68                  ERR("Error flushing shadow PT");
    8.69                  goto out;
    8.70 @@ -481,7 +481,7 @@ xc_linux_save(int xc_handle, int io_fd, 
    8.71   out:
    8.72  
    8.73      if (live) {
    8.74 -        if (xc_ia64_shadow_control(xc_handle, dom, DOM0_SHADOW_CONTROL_OP_OFF,
    8.75 +        if (xc_ia64_shadow_control(xc_handle, dom, XEN_DOMCTL_SHADOW_OP_OFF,
    8.76                                     NULL, 0, NULL ) < 0) {
    8.77              DPRINTF("Warning - couldn't disable shadow mode");
    8.78          }
     9.1 --- a/tools/libxc/ia64/xc_ia64_stubs.c	Fri Aug 25 10:39:24 2006 +0100
     9.2 +++ b/tools/libxc/ia64/xc_ia64_stubs.c	Fri Aug 25 18:39:10 2006 +0100
     9.3 @@ -33,7 +33,7 @@ int
     9.4  xc_ia64_get_pfn_list(int xc_handle, uint32_t domid, xen_pfn_t *pfn_buf,
     9.5                       unsigned int start_page, unsigned int nr_pages)
     9.6  {
     9.7 -    dom0_op_t op;
     9.8 +    struct xen_domctl domctl;
     9.9      int num_pfns,ret;
    9.10      unsigned int __start_page, __nr_pages;
    9.11      unsigned long max_pfns;
    9.12 @@ -45,11 +45,11 @@ xc_ia64_get_pfn_list(int xc_handle, uint
    9.13    
    9.14      while (__nr_pages) {
    9.15          max_pfns = ((unsigned long)__start_page << 32) | __nr_pages;
    9.16 -        op.cmd = DOM0_GETMEMLIST;
    9.17 -        op.u.getmemlist.domain   = (domid_t)domid;
    9.18 -        op.u.getmemlist.max_pfns = max_pfns;
    9.19 -        op.u.getmemlist.num_pfns = 0;
    9.20 -        set_xen_guest_handle(op.u.getmemlist.buffer, __pfn_buf);
    9.21 +        domctl.cmd = XEN_DOMCTL_getmemlist;
    9.22 +        domctl.domain   = (domid_t)domid;
    9.23 +        domctl.u.getmemlist.max_pfns = max_pfns;
    9.24 +        domctl.u.getmemlist.num_pfns = 0;
    9.25 +        set_xen_guest_handle(domctl.u.getmemlist.buffer, __pfn_buf);
    9.26  
    9.27          if ((max_pfns != -1UL)
    9.28              && mlock(__pfn_buf, __nr_pages * sizeof(xen_pfn_t)) != 0) {
    9.29 @@ -57,7 +57,7 @@ xc_ia64_get_pfn_list(int xc_handle, uint
    9.30              return -1;
    9.31          }
    9.32  
    9.33 -        ret = do_dom0_op(xc_handle, &op);
    9.34 +        ret = do_domctl(xc_handle, &domctl);
    9.35  
    9.36          if (max_pfns != -1UL)
    9.37              (void)munlock(__pfn_buf, __nr_pages * sizeof(xen_pfn_t));
    9.38 @@ -65,7 +65,7 @@ xc_ia64_get_pfn_list(int xc_handle, uint
    9.39          if (max_pfns == -1UL)
    9.40              return 0;
    9.41          
    9.42 -        num_pfns = op.u.getmemlist.num_pfns;
    9.43 +        num_pfns = domctl.u.getmemlist.num_pfns;
    9.44          __start_page += num_pfns;
    9.45          __nr_pages -= num_pfns;
    9.46          __pfn_buf += num_pfns;
    9.47 @@ -89,10 +89,10 @@ xc_get_pfn_list(int xc_handle, uint32_t 
    9.48  long
    9.49  xc_get_max_pages(int xc_handle, uint32_t domid)
    9.50  {
    9.51 -    dom0_op_t op;
    9.52 -    op.cmd = DOM0_GETDOMAININFO;
    9.53 -    op.u.getdomaininfo.domain = (domid_t)domid;
    9.54 -    return (do_dom0_op(xc_handle, &op) < 0) ? -1 : op.u.getdomaininfo.max_pages;
    9.55 +    struct xen_domctl domctl;
    9.56 +    domctl.cmd = XEN_DOMCTL_getdomaininfo;
    9.57 +    domctl.domain = (domid_t)domid;
    9.58 +    return (do_domctl(xc_handle, &domctl) < 0) ? -1 : domctl.u.getdomaininfo.max_pages;
    9.59  }
    9.60  
    9.61  /*
    10.1 --- a/tools/libxc/powerpc64/xc_linux_build.c	Fri Aug 25 10:39:24 2006 +0100
    10.2 +++ b/tools/libxc/powerpc64/xc_linux_build.c	Fri Aug 25 18:39:10 2006 +0100
    10.3 @@ -27,7 +27,6 @@
    10.4  #include <sys/types.h>
    10.5  #include <inttypes.h>
    10.6  
    10.7 -#include <xen/dom0_ops.h>
    10.8  #include <xen/memory.h>
    10.9  #include <xc_private.h>
   10.10  #include <xg_private.h>
    11.1 --- a/tools/libxc/xc_csched.c	Fri Aug 25 10:39:24 2006 +0100
    11.2 +++ b/tools/libxc/xc_csched.c	Fri Aug 25 18:39:10 2006 +0100
    11.3 @@ -15,36 +15,36 @@ int
    11.4  xc_sched_credit_domain_set(
    11.5      int xc_handle,
    11.6      uint32_t domid,
    11.7 -    struct sched_credit_adjdom *sdom)
    11.8 +    struct xen_domctl_sched_credit *sdom)
    11.9  {
   11.10 -    DECLARE_DOM0_OP;
   11.11 +    DECLARE_DOMCTL;
   11.12  
   11.13 -    op.cmd = DOM0_ADJUSTDOM;    
   11.14 -    op.u.adjustdom.domain = (domid_t) domid;
   11.15 -    op.u.adjustdom.sched_id = SCHED_CREDIT;
   11.16 -    op.u.adjustdom.direction = SCHED_INFO_PUT;
   11.17 -    op.u.adjustdom.u.credit = *sdom;
   11.18 +    domctl.cmd = XEN_DOMCTL_scheduler_op;
   11.19 +    domctl.domain = (domid_t) domid;
   11.20 +    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT;
   11.21 +    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
   11.22 +    domctl.u.scheduler_op.u.credit = *sdom;
   11.23  
   11.24 -    return do_dom0_op(xc_handle, &op);
   11.25 +    return do_domctl(xc_handle, &domctl);
   11.26  }
   11.27  
   11.28  int
   11.29  xc_sched_credit_domain_get(
   11.30      int xc_handle,
   11.31      uint32_t domid,
   11.32 -    struct sched_credit_adjdom *sdom)
   11.33 +    struct xen_domctl_sched_credit *sdom)
   11.34  {
   11.35 -    DECLARE_DOM0_OP;
   11.36 +    DECLARE_DOMCTL;
   11.37      int err;
   11.38  
   11.39 -    op.cmd = DOM0_ADJUSTDOM;    
   11.40 -    op.u.adjustdom.domain = (domid_t) domid;
   11.41 -    op.u.adjustdom.sched_id = SCHED_CREDIT;
   11.42 -    op.u.adjustdom.direction = SCHED_INFO_GET;
   11.43 +    domctl.cmd = XEN_DOMCTL_scheduler_op;
   11.44 +    domctl.domain = (domid_t) domid;
   11.45 +    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT;
   11.46 +    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
   11.47  
   11.48 -    err = do_dom0_op(xc_handle, &op);
   11.49 +    err = do_domctl(xc_handle, &domctl);
   11.50      if ( err == 0 )
   11.51 -        *sdom = op.u.adjustdom.u.credit;
   11.52 +        *sdom = domctl.u.scheduler_op.u.credit;
   11.53  
   11.54      return err;
   11.55  }
    12.1 --- a/tools/libxc/xc_domain.c	Fri Aug 25 10:39:24 2006 +0100
    12.2 +++ b/tools/libxc/xc_domain.c	Fri Aug 25 18:39:10 2006 +0100
    12.3 @@ -15,16 +15,16 @@ int xc_domain_create(int xc_handle,
    12.4                       uint32_t *pdomid)
    12.5  {
    12.6      int err;
    12.7 -    DECLARE_DOM0_OP;
    12.8 +    DECLARE_DOMCTL;
    12.9  
   12.10 -    op.cmd = DOM0_CREATEDOMAIN;
   12.11 -    op.u.createdomain.domain = (domid_t)*pdomid;
   12.12 -    op.u.createdomain.ssidref = ssidref;
   12.13 -    memcpy(op.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
   12.14 -    if ( (err = do_dom0_op(xc_handle, &op)) != 0 )
   12.15 +    domctl.cmd = XEN_DOMCTL_createdomain;
   12.16 +    domctl.domain = (domid_t)*pdomid;
   12.17 +    domctl.u.createdomain.ssidref = ssidref;
   12.18 +    memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
   12.19 +    if ( (err = do_domctl(xc_handle, &domctl)) != 0 )
   12.20          return err;
   12.21  
   12.22 -    *pdomid = (uint16_t)op.u.createdomain.domain;
   12.23 +    *pdomid = (uint16_t)domctl.domain;
   12.24      return 0;
   12.25  }
   12.26  
   12.27 @@ -32,30 +32,30 @@ int xc_domain_create(int xc_handle,
   12.28  int xc_domain_pause(int xc_handle,
   12.29                      uint32_t domid)
   12.30  {
   12.31 -    DECLARE_DOM0_OP;
   12.32 -    op.cmd = DOM0_PAUSEDOMAIN;
   12.33 -    op.u.pausedomain.domain = (domid_t)domid;
   12.34 -    return do_dom0_op(xc_handle, &op);
   12.35 +    DECLARE_DOMCTL;
   12.36 +    domctl.cmd = XEN_DOMCTL_pausedomain;
   12.37 +    domctl.domain = (domid_t)domid;
   12.38 +    return do_domctl(xc_handle, &domctl);
   12.39  }
   12.40  
   12.41  
   12.42  int xc_domain_unpause(int xc_handle,
   12.43                        uint32_t domid)
   12.44  {
   12.45 -    DECLARE_DOM0_OP;
   12.46 -    op.cmd = DOM0_UNPAUSEDOMAIN;
   12.47 -    op.u.unpausedomain.domain = (domid_t)domid;
   12.48 -    return do_dom0_op(xc_handle, &op);
   12.49 +    DECLARE_DOMCTL;
   12.50 +    domctl.cmd = XEN_DOMCTL_unpausedomain;
   12.51 +    domctl.domain = (domid_t)domid;
   12.52 +    return do_domctl(xc_handle, &domctl);
   12.53  }
   12.54  
   12.55  
   12.56  int xc_domain_destroy(int xc_handle,
   12.57                        uint32_t domid)
   12.58  {
   12.59 -    DECLARE_DOM0_OP;
   12.60 -    op.cmd = DOM0_DESTROYDOMAIN;
   12.61 -    op.u.destroydomain.domain = (domid_t)domid;
   12.62 -    return do_dom0_op(xc_handle, &op);
   12.63 +    DECLARE_DOMCTL;
   12.64 +    domctl.cmd = XEN_DOMCTL_destroydomain;
   12.65 +    domctl.domain = (domid_t)domid;
   12.66 +    return do_domctl(xc_handle, &domctl);
   12.67  }
   12.68  
   12.69  int xc_domain_shutdown(int xc_handle,
   12.70 @@ -90,14 +90,62 @@ int xc_domain_shutdown(int xc_handle,
   12.71  int xc_vcpu_setaffinity(int xc_handle,
   12.72                          uint32_t domid,
   12.73                          int vcpu,
   12.74 -                        cpumap_t cpumap)
   12.75 +                        uint64_t cpumap)
   12.76  {
   12.77 -    DECLARE_DOM0_OP;
   12.78 -    op.cmd = DOM0_SETVCPUAFFINITY;
   12.79 -    op.u.setvcpuaffinity.domain  = (domid_t)domid;
   12.80 -    op.u.setvcpuaffinity.vcpu    = vcpu;
   12.81 -    op.u.setvcpuaffinity.cpumap  = cpumap;
   12.82 -    return do_dom0_op(xc_handle, &op);
   12.83 +    DECLARE_DOMCTL;
   12.84 +    int ret = -1;
   12.85 +
   12.86 +    domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
   12.87 +    domctl.domain = (domid_t)domid;
   12.88 +    domctl.u.vcpuaffinity.vcpu    = vcpu;
   12.89 +
   12.90 +    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap,
   12.91 +                         (uint8_t *)&cpumap);
   12.92 +    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
   12.93 +    
   12.94 +    if ( mlock(&cpumap, sizeof(cpumap)) != 0 )
   12.95 +    {
   12.96 +        PERROR("Could not lock memory for Xen hypercall");
   12.97 +        goto out;
   12.98 +    }
   12.99 +
  12.100 +    ret = do_domctl(xc_handle, &domctl);
  12.101 +
  12.102 +    safe_munlock(&cpumap, sizeof(cpumap));
  12.103 +
  12.104 + out:
  12.105 +    return ret;
  12.106 +}
  12.107 +
  12.108 +
  12.109 +int xc_vcpu_getaffinity(int xc_handle,
  12.110 +                        uint32_t domid,
  12.111 +                        int vcpu,
  12.112 +                        uint64_t *cpumap)
  12.113 +{
  12.114 +    DECLARE_DOMCTL;
  12.115 +    int ret = -1;
  12.116 +
  12.117 +    domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
  12.118 +    domctl.domain = (domid_t)domid;
  12.119 +    domctl.u.vcpuaffinity.vcpu = vcpu;
  12.120 +
  12.121 +    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap,
  12.122 +                         (uint8_t *)cpumap);
  12.123 +    domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(*cpumap) * 8;
  12.124 +    
  12.125 +    if ( mlock(cpumap, sizeof(*cpumap)) != 0 )
  12.126 +    {
  12.127 +        PERROR("Could not lock memory for Xen hypercall");
  12.128 +        goto out;
  12.129 +    }
  12.130 +
  12.131 +    ret = do_domctl(xc_handle, &domctl);
  12.132 +
  12.133 +    safe_munlock(cpumap, sizeof(*cpumap));
  12.134 +
  12.135 + out:
  12.136 +    return ret;
  12.137  }
  12.138  
  12.139  
  12.140 @@ -108,27 +156,27 @@ int xc_domain_getinfo(int xc_handle,
  12.141  {
  12.142      unsigned int nr_doms;
  12.143      uint32_t next_domid = first_domid;
  12.144 -    DECLARE_DOM0_OP;
  12.145 +    DECLARE_DOMCTL;
  12.146      int rc = 0;
  12.147  
  12.148      memset(info, 0, max_doms*sizeof(xc_dominfo_t));
  12.149  
  12.150      for ( nr_doms = 0; nr_doms < max_doms; nr_doms++ )
  12.151      {
  12.152 -        op.cmd = DOM0_GETDOMAININFO;
  12.153 -        op.u.getdomaininfo.domain = (domid_t)next_domid;
  12.154 -        if ( (rc = do_dom0_op(xc_handle, &op)) < 0 )
  12.155 +        domctl.cmd = XEN_DOMCTL_getdomaininfo;
  12.156 +        domctl.domain = (domid_t)next_domid;
  12.157 +        if ( (rc = do_domctl(xc_handle, &domctl)) < 0 )
  12.158              break;
  12.159 -        info->domid      = (uint16_t)op.u.getdomaininfo.domain;
  12.160 +        info->domid      = (uint16_t)domctl.domain;
  12.161  
  12.162 -        info->dying    = !!(op.u.getdomaininfo.flags & DOMFLAGS_DYING);
  12.163 -        info->shutdown = !!(op.u.getdomaininfo.flags & DOMFLAGS_SHUTDOWN);
  12.164 -        info->paused   = !!(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED);
  12.165 -        info->blocked  = !!(op.u.getdomaininfo.flags & DOMFLAGS_BLOCKED);
  12.166 -        info->running  = !!(op.u.getdomaininfo.flags & DOMFLAGS_RUNNING);
  12.167 +        info->dying    = !!(domctl.u.getdomaininfo.flags & DOMFLAGS_DYING);
  12.168 +        info->shutdown = !!(domctl.u.getdomaininfo.flags & DOMFLAGS_SHUTDOWN);
  12.169 +        info->paused   = !!(domctl.u.getdomaininfo.flags & DOMFLAGS_PAUSED);
  12.170 +        info->blocked  = !!(domctl.u.getdomaininfo.flags & DOMFLAGS_BLOCKED);
  12.171 +        info->running  = !!(domctl.u.getdomaininfo.flags & DOMFLAGS_RUNNING);
  12.172  
  12.173          info->shutdown_reason =
  12.174 -            (op.u.getdomaininfo.flags>>DOMFLAGS_SHUTDOWNSHIFT) &
  12.175 +            (domctl.u.getdomaininfo.flags>>DOMFLAGS_SHUTDOWNSHIFT) &
  12.176              DOMFLAGS_SHUTDOWNMASK;
  12.177  
  12.178          if ( info->shutdown && (info->shutdown_reason == SHUTDOWN_crash) )
  12.179 @@ -137,18 +185,18 @@ int xc_domain_getinfo(int xc_handle,
  12.180              info->crashed  = 1;
  12.181          }
  12.182  
  12.183 -        info->ssidref  = op.u.getdomaininfo.ssidref;
  12.184 -        info->nr_pages = op.u.getdomaininfo.tot_pages;
  12.185 -        info->max_memkb = op.u.getdomaininfo.max_pages << (PAGE_SHIFT - 10);
  12.186 -        info->shared_info_frame = op.u.getdomaininfo.shared_info_frame;
  12.187 -        info->cpu_time = op.u.getdomaininfo.cpu_time;
  12.188 -        info->nr_online_vcpus = op.u.getdomaininfo.nr_online_vcpus;
  12.189 -        info->max_vcpu_id = op.u.getdomaininfo.max_vcpu_id;
  12.190 +        info->ssidref  = domctl.u.getdomaininfo.ssidref;
  12.191 +        info->nr_pages = domctl.u.getdomaininfo.tot_pages;
  12.192 +        info->max_memkb = domctl.u.getdomaininfo.max_pages << (PAGE_SHIFT-10);
  12.193 +        info->shared_info_frame = domctl.u.getdomaininfo.shared_info_frame;
  12.194 +        info->cpu_time = domctl.u.getdomaininfo.cpu_time;
  12.195 +        info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
  12.196 +        info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
  12.197  
  12.198 -        memcpy(info->handle, op.u.getdomaininfo.handle,
  12.199 +        memcpy(info->handle, domctl.u.getdomaininfo.handle,
  12.200                 sizeof(xen_domain_handle_t));
  12.201  
  12.202 -        next_domid = (uint16_t)op.u.getdomaininfo.domain + 1;
  12.203 +        next_domid = (uint16_t)domctl.domain + 1;
  12.204          info++;
  12.205      }
  12.206  
  12.207 @@ -163,20 +211,20 @@ int xc_domain_getinfolist(int xc_handle,
  12.208                            xc_domaininfo_t *info)
  12.209  {
  12.210      int ret = 0;
  12.211 -    DECLARE_DOM0_OP;
  12.212 +    DECLARE_SYSCTL;
  12.213  
  12.214      if ( mlock(info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
  12.215          return -1;
  12.216  
  12.217 -    op.cmd = DOM0_GETDOMAININFOLIST;
  12.218 -    op.u.getdomaininfolist.first_domain = first_domain;
  12.219 -    op.u.getdomaininfolist.max_domains  = max_domains;
  12.220 -    set_xen_guest_handle(op.u.getdomaininfolist.buffer, info);
  12.221 +    sysctl.cmd = XEN_SYSCTL_getdomaininfolist;
  12.222 +    sysctl.u.getdomaininfolist.first_domain = first_domain;
  12.223 +    sysctl.u.getdomaininfolist.max_domains  = max_domains;
  12.224 +    set_xen_guest_handle(sysctl.u.getdomaininfolist.buffer, info);
  12.225  
  12.226 -    if ( xc_dom0_op(xc_handle, &op) < 0 )
  12.227 +    if ( xc_sysctl(xc_handle, &sysctl) < 0 )
  12.228          ret = -1;
  12.229      else
  12.230 -        ret = op.u.getdomaininfolist.num_domains;
  12.231 +        ret = sysctl.u.getdomaininfolist.num_domains;
  12.232  
  12.233      if ( munlock(info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
  12.234          ret = -1;
  12.235 @@ -190,17 +238,17 @@ int xc_vcpu_getcontext(int xc_handle,
  12.236                                 vcpu_guest_context_t *ctxt)
  12.237  {
  12.238      int rc;
  12.239 -    DECLARE_DOM0_OP;
  12.240 +    DECLARE_DOMCTL;
  12.241  
  12.242 -    op.cmd = DOM0_GETVCPUCONTEXT;
  12.243 -    op.u.getvcpucontext.domain = (domid_t)domid;
  12.244 -    op.u.getvcpucontext.vcpu   = (uint16_t)vcpu;
  12.245 -    set_xen_guest_handle(op.u.getvcpucontext.ctxt, ctxt);
  12.246 +    domctl.cmd = XEN_DOMCTL_getvcpucontext;
  12.247 +    domctl.domain = (domid_t)domid;
  12.248 +    domctl.u.vcpucontext.vcpu   = (uint16_t)vcpu;
  12.249 +    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
  12.250  
  12.251      if ( (rc = mlock(ctxt, sizeof(*ctxt))) != 0 )
  12.252          return rc;
  12.253  
  12.254 -    rc = do_dom0_op(xc_handle, &op);
  12.255 +    rc = do_domctl(xc_handle, &domctl);
  12.256  
  12.257      safe_munlock(ctxt, sizeof(*ctxt));
  12.258  
  12.259 @@ -215,28 +263,28 @@ int xc_shadow_control(int xc_handle,
  12.260                        unsigned long pages,
  12.261                        unsigned long *mb,
  12.262                        uint32_t mode,
  12.263 -                      xc_shadow_control_stats_t *stats)
  12.264 +                      xc_shadow_op_stats_t *stats)
  12.265  {
  12.266      int rc;
  12.267 -    DECLARE_DOM0_OP;
  12.268 -    op.cmd = DOM0_SHADOW_CONTROL;
  12.269 -    op.u.shadow_control.domain = (domid_t)domid;
  12.270 -    op.u.shadow_control.op     = sop;
  12.271 -    op.u.shadow_control.pages  = pages;
  12.272 -    op.u.shadow_control.mb     = mb ? *mb : 0;
  12.273 -    op.u.shadow_control.mode   = mode;
  12.274 -    set_xen_guest_handle(op.u.shadow_control.dirty_bitmap, dirty_bitmap);
  12.275 +    DECLARE_DOMCTL;
  12.276 +    domctl.cmd = XEN_DOMCTL_shadow_op;
  12.277 +    domctl.domain = (domid_t)domid;
  12.278 +    domctl.u.shadow_op.op     = sop;
  12.279 +    domctl.u.shadow_op.pages  = pages;
  12.280 +    domctl.u.shadow_op.mb     = mb ? *mb : 0;
  12.281 +    domctl.u.shadow_op.mode   = mode;
  12.282 +    set_xen_guest_handle(domctl.u.shadow_op.dirty_bitmap, dirty_bitmap);
  12.283  
  12.284 -    rc = do_dom0_op(xc_handle, &op);
  12.285 +    rc = do_domctl(xc_handle, &domctl);
  12.286  
  12.287      if ( stats )
  12.288 -        memcpy(stats, &op.u.shadow_control.stats,
  12.289 -               sizeof(xc_shadow_control_stats_t));
  12.290 +        memcpy(stats, &domctl.u.shadow_op.stats,
  12.291 +               sizeof(xc_shadow_op_stats_t));
  12.292      
  12.293      if ( mb ) 
  12.294 -        *mb = op.u.shadow_control.mb;
  12.295 +        *mb = domctl.u.shadow_op.mb;
  12.296  
  12.297 -    return (rc == 0) ? op.u.shadow_control.pages : rc;
  12.298 +    return (rc == 0) ? domctl.u.shadow_op.pages : rc;
  12.299  }
  12.300  
  12.301  int xc_domain_setcpuweight(int xc_handle,
  12.302 @@ -258,22 +306,22 @@ int xc_domain_setmaxmem(int xc_handle,
  12.303                          uint32_t domid,
  12.304                          unsigned int max_memkb)
  12.305  {
  12.306 -    DECLARE_DOM0_OP;
  12.307 -    op.cmd = DOM0_SETDOMAINMAXMEM;
  12.308 -    op.u.setdomainmaxmem.domain = (domid_t)domid;
  12.309 -    op.u.setdomainmaxmem.max_memkb = max_memkb;
  12.310 -    return do_dom0_op(xc_handle, &op);
  12.311 +    DECLARE_DOMCTL;
  12.312 +    domctl.cmd = XEN_DOMCTL_max_mem;
  12.313 +    domctl.domain = (domid_t)domid;
  12.314 +    domctl.u.max_mem.max_memkb = max_memkb;
  12.315 +    return do_domctl(xc_handle, &domctl);
  12.316  }
  12.317  
  12.318  int xc_domain_set_time_offset(int xc_handle,
  12.319                                uint32_t domid,
  12.320                                int32_t time_offset_seconds)
  12.321  {
  12.322 -    DECLARE_DOM0_OP;
  12.323 -    op.cmd = DOM0_SETTIMEOFFSET;
  12.324 -    op.u.settimeoffset.domain = (domid_t)domid;
  12.325 -    op.u.settimeoffset.time_offset_seconds = time_offset_seconds;
  12.326 -    return do_dom0_op(xc_handle, &op);
  12.327 +    DECLARE_DOMCTL;
  12.328 +    domctl.cmd = XEN_DOMCTL_settimeoffset;
  12.329 +    domctl.domain = (domid_t)domid;
  12.330 +    domctl.u.settimeoffset.time_offset_seconds = time_offset_seconds;
  12.331 +    return do_domctl(xc_handle, &domctl);
  12.332  }
  12.333  
  12.334  int xc_domain_memory_increase_reservation(int xc_handle,
  12.335 @@ -397,21 +445,22 @@ int xc_domain_translate_gpfn_list(int xc
  12.336  
  12.337  int xc_domain_max_vcpus(int xc_handle, uint32_t domid, unsigned int max)
  12.338  {
  12.339 -    DECLARE_DOM0_OP;
  12.340 -    op.cmd = DOM0_MAX_VCPUS;
  12.341 -    op.u.max_vcpus.domain = (domid_t)domid;
  12.342 -    op.u.max_vcpus.max    = max;
  12.343 -    return do_dom0_op(xc_handle, &op);
  12.344 +    DECLARE_DOMCTL;
  12.345 +    domctl.cmd = XEN_DOMCTL_max_vcpus;
  12.346 +    domctl.domain = (domid_t)domid;
  12.347 +    domctl.u.max_vcpus.max    = max;
  12.348 +    return do_domctl(xc_handle, &domctl);
  12.349  }
  12.350  
  12.351  int xc_domain_sethandle(int xc_handle, uint32_t domid,
  12.352                          xen_domain_handle_t handle)
  12.353  {
  12.354 -    DECLARE_DOM0_OP;
  12.355 -    op.cmd = DOM0_SETDOMAINHANDLE;
  12.356 -    op.u.setdomainhandle.domain = (domid_t)domid;
  12.357 -    memcpy(op.u.setdomainhandle.handle, handle, sizeof(xen_domain_handle_t));
  12.358 -    return do_dom0_op(xc_handle, &op);
  12.359 +    DECLARE_DOMCTL;
  12.360 +    domctl.cmd = XEN_DOMCTL_setdomainhandle;
  12.361 +    domctl.domain = (domid_t)domid;
  12.362 +    memcpy(domctl.u.setdomainhandle.handle, handle,
  12.363 +           sizeof(xen_domain_handle_t));
  12.364 +    return do_domctl(xc_handle, &domctl);
  12.365  }
  12.366  
  12.367  int xc_vcpu_getinfo(int xc_handle,
  12.368 @@ -420,14 +469,15 @@ int xc_vcpu_getinfo(int xc_handle,
  12.369                      xc_vcpuinfo_t *info)
  12.370  {
  12.371      int rc;
  12.372 -    DECLARE_DOM0_OP;
  12.373 -    op.cmd = DOM0_GETVCPUINFO;
  12.374 -    op.u.getvcpuinfo.domain = (domid_t)domid;
  12.375 -    op.u.getvcpuinfo.vcpu   = (uint16_t)vcpu;
  12.376 +    DECLARE_DOMCTL;
  12.377  
  12.378 -    rc = do_dom0_op(xc_handle, &op);
  12.379 +    domctl.cmd = XEN_DOMCTL_getvcpuinfo;
  12.380 +    domctl.domain = (domid_t)domid;
  12.381 +    domctl.u.getvcpuinfo.vcpu   = (uint16_t)vcpu;
  12.382  
  12.383 -    memcpy(info, &op.u.getvcpuinfo, sizeof(*info));
  12.384 +    rc = do_domctl(xc_handle, &domctl);
  12.385 +
  12.386 +    memcpy(info, &domctl.u.getvcpuinfo, sizeof(*info));
  12.387  
  12.388      return rc;
  12.389  }
  12.390 @@ -438,15 +488,15 @@ int xc_domain_ioport_permission(int xc_h
  12.391                                  uint32_t nr_ports,
  12.392                                  uint32_t allow_access)
  12.393  {
  12.394 -    DECLARE_DOM0_OP;
  12.395 +    DECLARE_DOMCTL;
  12.396  
  12.397 -    op.cmd = DOM0_IOPORT_PERMISSION;
  12.398 -    op.u.ioport_permission.domain = (domid_t)domid;
  12.399 -    op.u.ioport_permission.first_port = first_port;
  12.400 -    op.u.ioport_permission.nr_ports = nr_ports;
  12.401 -    op.u.ioport_permission.allow_access = allow_access;
  12.402 +    domctl.cmd = XEN_DOMCTL_ioport_permission;
  12.403 +    domctl.domain = (domid_t)domid;
  12.404 +    domctl.u.ioport_permission.first_port = first_port;
  12.405 +    domctl.u.ioport_permission.nr_ports = nr_ports;
  12.406 +    domctl.u.ioport_permission.allow_access = allow_access;
  12.407  
  12.408 -    return do_dom0_op(xc_handle, &op);
  12.409 +    return do_domctl(xc_handle, &domctl);
  12.410  }
  12.411  
  12.412  int xc_vcpu_setcontext(int xc_handle,
  12.413 @@ -454,18 +504,18 @@ int xc_vcpu_setcontext(int xc_handle,
  12.414                         uint32_t vcpu,
  12.415                         vcpu_guest_context_t *ctxt)
  12.416  {
  12.417 -    dom0_op_t op;
  12.418 +    DECLARE_DOMCTL;
  12.419      int rc;
  12.420  
  12.421 -    op.cmd = DOM0_SETVCPUCONTEXT;
  12.422 -    op.u.setvcpucontext.domain = domid;
  12.423 -    op.u.setvcpucontext.vcpu = vcpu;
  12.424 -    set_xen_guest_handle(op.u.setvcpucontext.ctxt, ctxt);
  12.425 +    domctl.cmd = XEN_DOMCTL_setvcpucontext;
  12.426 +    domctl.domain = domid;
  12.427 +    domctl.u.vcpucontext.vcpu = vcpu;
  12.428 +    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
  12.429  
  12.430      if ( (rc = mlock(ctxt, sizeof(*ctxt))) != 0 )
  12.431          return rc;
  12.432  
  12.433 -    rc = do_dom0_op(xc_handle, &op);
  12.434 +    rc = do_domctl(xc_handle, &domctl);
  12.435  
  12.436      safe_munlock(ctxt, sizeof(*ctxt));
  12.437  
  12.438 @@ -478,14 +528,14 @@ int xc_domain_irq_permission(int xc_hand
  12.439                               uint8_t pirq,
  12.440                               uint8_t allow_access)
  12.441  {
  12.442 -    dom0_op_t op;
  12.443 +    DECLARE_DOMCTL;
  12.444  
  12.445 -    op.cmd = DOM0_IRQ_PERMISSION;
  12.446 -    op.u.irq_permission.domain = domid;
  12.447 -    op.u.irq_permission.pirq = pirq;
  12.448 -    op.u.irq_permission.allow_access = allow_access;
  12.449 +    domctl.cmd = XEN_DOMCTL_irq_permission;
  12.450 +    domctl.domain = domid;
  12.451 +    domctl.u.irq_permission.pirq = pirq;
  12.452 +    domctl.u.irq_permission.allow_access = allow_access;
  12.453  
  12.454 -    return do_dom0_op(xc_handle, &op);
  12.455 +    return do_domctl(xc_handle, &domctl);
  12.456  }
  12.457  
  12.458  int xc_domain_iomem_permission(int xc_handle,
  12.459 @@ -494,15 +544,15 @@ int xc_domain_iomem_permission(int xc_ha
  12.460                                 unsigned long nr_mfns,
  12.461                                 uint8_t allow_access)
  12.462  {
  12.463 -    dom0_op_t op;
  12.464 +    DECLARE_DOMCTL;
  12.465  
  12.466 -    op.cmd = DOM0_IOMEM_PERMISSION;
  12.467 -    op.u.iomem_permission.domain = domid;
  12.468 -    op.u.iomem_permission.first_mfn = first_mfn;
  12.469 -    op.u.iomem_permission.nr_mfns = nr_mfns;
  12.470 -    op.u.iomem_permission.allow_access = allow_access;
  12.471 +    domctl.cmd = XEN_DOMCTL_iomem_permission;
  12.472 +    domctl.domain = domid;
  12.473 +    domctl.u.iomem_permission.first_mfn = first_mfn;
  12.474 +    domctl.u.iomem_permission.nr_mfns = nr_mfns;
  12.475 +    domctl.u.iomem_permission.allow_access = allow_access;
  12.476  
  12.477 -    return do_dom0_op(xc_handle, &op);
  12.478 +    return do_domctl(xc_handle, &domctl);
  12.479  }
  12.480  
  12.481  /*
    13.1 --- a/tools/libxc/xc_hvm_build.c	Fri Aug 25 10:39:24 2006 +0100
    13.2 +++ b/tools/libxc/xc_hvm_build.c	Fri Aug 25 18:39:10 2006 +0100
    13.3 @@ -395,7 +395,7 @@ static int xc_hvm_build_internal(int xc_
    13.4                                   unsigned int store_evtchn,
    13.5                                   unsigned long *store_mfn)
    13.6  {
    13.7 -    dom0_op_t launch_op, op;
    13.8 +    struct xen_domctl launch_domctl, domctl;
    13.9      int rc, i;
   13.10      vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
   13.11      unsigned long nr_pages;
   13.12 @@ -432,21 +432,21 @@ static int xc_hvm_build_internal(int xc_
   13.13          return 1;
   13.14      }
   13.15  
   13.16 -    op.cmd = DOM0_GETDOMAININFO;
   13.17 -    op.u.getdomaininfo.domain = (domid_t)domid;
   13.18 -    if ( (xc_dom0_op(xc_handle, &op) < 0) ||
   13.19 -         ((uint16_t)op.u.getdomaininfo.domain != domid) )
   13.20 +    domctl.cmd = XEN_DOMCTL_getdomaininfo;
   13.21 +    domctl.domain = (domid_t)domid;
   13.22 +    if ( (xc_domctl(xc_handle, &domctl) < 0) ||
   13.23 +         ((uint16_t)domctl.domain != domid) )
   13.24      {
   13.25          PERROR("Could not get info on domain");
   13.26          goto error_out;
   13.27      }
   13.28  
   13.29      /* HVM domains must be put into shadow2 mode at the start of day */
   13.30 -    if ( xc_shadow_control(xc_handle, domid, DOM0_SHADOW_CONTROL_OP_ENABLE,
   13.31 +    if ( xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_ENABLE,
   13.32                             NULL, 0, NULL, 
   13.33 -                           DOM0_SHADOW_ENABLE_REFCOUNT  |
   13.34 -                           DOM0_SHADOW_ENABLE_TRANSLATE |
   13.35 -                           DOM0_SHADOW_ENABLE_EXTERNAL, 
   13.36 +                           XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT  |
   13.37 +                           XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE |
   13.38 +                           XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL, 
   13.39                             NULL) )
   13.40      {
   13.41          PERROR("Could not enable shadow paging for domain.\n");
   13.42 @@ -457,7 +457,7 @@ static int xc_hvm_build_internal(int xc_
   13.43  
   13.44      ctxt->flags = VGCF_HVM_GUEST;
   13.45      if ( setup_guest(xc_handle, domid, memsize, image, image_size, nr_pages,
   13.46 -                     ctxt, op.u.getdomaininfo.shared_info_frame,
   13.47 +                     ctxt, domctl.u.getdomaininfo.shared_info_frame,
   13.48                       vcpus, pae, acpi, apic, store_evtchn, store_mfn) < 0)
   13.49      {
   13.50          ERROR("Error constructing guest OS");
   13.51 @@ -495,14 +495,14 @@ static int xc_hvm_build_internal(int xc_
   13.52      ctxt->syscall_callback_eip  = 0;
   13.53  #endif
   13.54  
   13.55 -    memset( &launch_op, 0, sizeof(launch_op) );
   13.56 +    memset(&launch_domctl, 0, sizeof(launch_domctl));
   13.57  
   13.58 -    launch_op.u.setvcpucontext.domain = (domid_t)domid;
   13.59 -    launch_op.u.setvcpucontext.vcpu   = 0;
   13.60 -    set_xen_guest_handle(launch_op.u.setvcpucontext.ctxt, ctxt);
   13.61 +    launch_domctl.domain = (domid_t)domid;
   13.62 +    launch_domctl.u.vcpucontext.vcpu   = 0;
   13.63 +    set_xen_guest_handle(launch_domctl.u.vcpucontext.ctxt, ctxt);
   13.64  
   13.65 -    launch_op.cmd = DOM0_SETVCPUCONTEXT;
   13.66 -    rc = xc_dom0_op(xc_handle, &launch_op);
   13.67 +    launch_domctl.cmd = XEN_DOMCTL_setvcpucontext;
   13.68 +    rc = xc_domctl(xc_handle, &launch_domctl);
   13.69  
   13.70      return rc;
   13.71  
    14.1 --- a/tools/libxc/xc_linux_build.c	Fri Aug 25 10:39:24 2006 +0100
    14.2 +++ b/tools/libxc/xc_linux_build.c	Fri Aug 25 18:39:10 2006 +0100
    14.3 @@ -474,7 +474,7 @@ static int setup_guest(int xc_handle,
    14.4      struct xen_ia64_boot_param *bp;
    14.5      shared_info_t *shared_info;
    14.6      int i;
    14.7 -    DECLARE_DOM0_OP;
    14.8 +    DECLARE_DOMCTL;
    14.9      int rc;
   14.10  
   14.11      rc = probeimageformat(image, image_size, &load_funcs);
   14.12 @@ -494,14 +494,13 @@ static int setup_guest(int xc_handle,
   14.13      start_info_mpa = (nr_pages - 3) << PAGE_SHIFT;
   14.14  
   14.15      /* Build firmware.  */
   14.16 -    memset(&op.u.domain_setup, 0, sizeof(op.u.domain_setup));
   14.17 -    op.u.domain_setup.flags = 0;
   14.18 -    op.u.domain_setup.domain = (domid_t)dom;
   14.19 -    op.u.domain_setup.bp = start_info_mpa + sizeof (start_info_t);
   14.20 -    op.u.domain_setup.maxmem = (nr_pages - 3) << PAGE_SHIFT;
   14.21 -    
   14.22 -    op.cmd = DOM0_DOMAIN_SETUP;
   14.23 -    if ( xc_dom0_op(xc_handle, &op) )
   14.24 +    memset(&domctl.u.arch_setup, 0, sizeof(domctl.u.arch_setup));
   14.25 +    domctl.u.arch_setup.flags = 0;
   14.26 +    domctl.u.arch_setup.bp = start_info_mpa + sizeof (start_info_t);
   14.27 +    domctl.u.arch_setup.maxmem = (nr_pages - 3) << PAGE_SHIFT;
   14.28 +    domctl.cmd = XEN_DOMCTL_arch_setup;
   14.29 +    domctl.domain = (domid_t)dom;
   14.30 +    if ( xc_domctl(xc_handle, &domctl) )
   14.31          goto error_out;
   14.32  
   14.33      start_page = dsi.v_start >> PAGE_SHIFT;
   14.34 @@ -662,7 +661,7 @@ static int setup_guest(int xc_handle,
   14.35      shared_info_t *shared_info;
   14.36      xc_mmu_t *mmu = NULL;
   14.37      const char *p;
   14.38 -    DECLARE_DOM0_OP;
   14.39 +    DECLARE_DOMCTL;
   14.40      int rc;
   14.41  
   14.42      unsigned long nr_pt_pages;
   14.43 @@ -966,7 +965,7 @@ static int setup_guest(int xc_handle,
   14.44  
   14.45          /* Enable shadow translate mode */
   14.46          if ( xc_shadow_control(xc_handle, dom,
   14.47 -                               DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE,
   14.48 +                               XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE,
   14.49                                 NULL, 0, NULL, 0, NULL) < 0 )
   14.50          {
   14.51              PERROR("Could not enable translation mode");
   14.52 @@ -1077,11 +1076,11 @@ static int setup_guest(int xc_handle,
   14.53          unsigned long long pfn = (hypercall_page - dsi.v_start) >> PAGE_SHIFT;
   14.54          if ( pfn >= nr_pages )
   14.55              goto error_out;
   14.56 -        op.u.hypercall_init.domain = (domid_t)dom;
   14.57 -        op.u.hypercall_init.gmfn   = shadow_mode_enabled ?
   14.58 +        domctl.domain = (domid_t)dom;
   14.59 +        domctl.u.hypercall_init.gmfn   = shadow_mode_enabled ?
   14.60              pfn : page_array[pfn];
   14.61 -        op.cmd = DOM0_HYPERCALL_INIT;
   14.62 -        if ( xc_dom0_op(xc_handle, &op) )
   14.63 +        domctl.cmd = XEN_DOMCTL_hypercall_init;
   14.64 +        if ( xc_domctl(xc_handle, &domctl) )
   14.65              goto error_out;
   14.66      }
   14.67  
   14.68 @@ -1114,8 +1113,8 @@ static int xc_linux_build_internal(int x
   14.69                                     unsigned int console_evtchn,
   14.70                                     unsigned long *console_mfn)
   14.71  {
   14.72 -    dom0_op_t launch_op;
   14.73 -    DECLARE_DOM0_OP;
   14.74 +    struct xen_domctl launch_domctl;
   14.75 +    DECLARE_DOMCTL;
   14.76      int rc, i;
   14.77      vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
   14.78      unsigned long nr_pages;
   14.79 @@ -1147,10 +1146,10 @@ static int xc_linux_build_internal(int x
   14.80          return 1;
   14.81      }
   14.82  
   14.83 -    op.cmd = DOM0_GETDOMAININFO;
   14.84 -    op.u.getdomaininfo.domain = (domid_t)domid;
   14.85 -    if ( (xc_dom0_op(xc_handle, &op) < 0) ||
   14.86 -         ((uint16_t)op.u.getdomaininfo.domain != domid) )
   14.87 +    domctl.cmd = XEN_DOMCTL_getdomaininfo;
   14.88 +    domctl.domain = (domid_t)domid;
   14.89 +    if ( (xc_domctl(xc_handle, &domctl) < 0) ||
   14.90 +         ((uint16_t)domctl.domain != domid) )
   14.91      {
   14.92          PERROR("Could not get info on domain");
   14.93          goto error_out;
   14.94 @@ -1163,7 +1162,7 @@ static int xc_linux_build_internal(int x
   14.95                       nr_pages,
   14.96                       &vstartinfo_start, &vkern_entry,
   14.97                       &vstack_start, ctxt, cmdline,
   14.98 -                     op.u.getdomaininfo.shared_info_frame,
   14.99 +                     domctl.u.getdomaininfo.shared_info_frame,
  14.100                       flags, store_evtchn, store_mfn,
  14.101                       console_evtchn, console_mfn,
  14.102                       features_bitmap) < 0 )
  14.103 @@ -1239,14 +1238,14 @@ static int xc_linux_build_internal(int x
  14.104  #endif
  14.105  #endif /* x86 */
  14.106  
  14.107 -    memset( &launch_op, 0, sizeof(launch_op) );
  14.108 +    memset( &launch_domctl, 0, sizeof(launch_domctl) );
  14.109  
  14.110 -    launch_op.u.setvcpucontext.domain = (domid_t)domid;
  14.111 -    launch_op.u.setvcpucontext.vcpu   = 0;
  14.112 -    set_xen_guest_handle(launch_op.u.setvcpucontext.ctxt, ctxt);
  14.113 +    launch_domctl.domain = (domid_t)domid;
  14.114 +    launch_domctl.u.vcpucontext.vcpu   = 0;
  14.115 +    set_xen_guest_handle(launch_domctl.u.vcpucontext.ctxt, ctxt);
  14.116  
  14.117 -    launch_op.cmd = DOM0_SETVCPUCONTEXT;
  14.118 -    rc = xc_dom0_op(xc_handle, &launch_op);
  14.119 +    launch_domctl.cmd = XEN_DOMCTL_setvcpucontext;
  14.120 +    rc = xc_domctl(xc_handle, &launch_domctl);
  14.121  
  14.122      return rc;
  14.123  
    15.1 --- a/tools/libxc/xc_linux_restore.c	Fri Aug 25 10:39:24 2006 +0100
    15.2 +++ b/tools/libxc/xc_linux_restore.c	Fri Aug 25 18:39:10 2006 +0100
    15.3 @@ -107,7 +107,7 @@ int xc_linux_restore(int xc_handle, int 
    15.4                       unsigned int store_evtchn, unsigned long *store_mfn,
    15.5                       unsigned int console_evtchn, unsigned long *console_mfn)
    15.6  {
    15.7 -    DECLARE_DOM0_OP;
    15.8 +    DECLARE_DOMCTL;
    15.9      int rc = 1, i, n, pae_extended_cr3 = 0;
   15.10      unsigned long mfn, pfn;
   15.11      unsigned int prev_pc, this_pc;
   15.12 @@ -163,7 +163,7 @@ int xc_linux_restore(int xc_handle, int 
   15.13      }
   15.14  
   15.15      if (mlock(&ctxt, sizeof(ctxt))) {
   15.16 -        /* needed for build dom0 op, but might as well do early */
   15.17 +        /* needed for build domctl, but might as well do early */
   15.18          ERR("Unable to mlock ctxt");
   15.19          return 1;
   15.20      }
   15.21 @@ -257,13 +257,13 @@ int xc_linux_restore(int xc_handle, int 
   15.22      }
   15.23  
   15.24      /* Get the domain's shared-info frame. */
   15.25 -    op.cmd = DOM0_GETDOMAININFO;
   15.26 -    op.u.getdomaininfo.domain = (domid_t)dom;
   15.27 -    if (xc_dom0_op(xc_handle, &op) < 0) {
   15.28 +    domctl.cmd = XEN_DOMCTL_getdomaininfo;
   15.29 +    domctl.domain = (domid_t)dom;
   15.30 +    if (xc_domctl(xc_handle, &domctl) < 0) {
   15.31          ERR("Could not get information on new domain");
   15.32          goto out;
   15.33      }
   15.34 -    shared_info_frame = op.u.getdomaininfo.shared_info_frame;
   15.35 +    shared_info_frame = domctl.u.getdomaininfo.shared_info_frame;
   15.36  
   15.37      if(xc_domain_setmaxmem(xc_handle, dom, PFN_TO_KB(max_pfn)) != 0) {
   15.38          errno = ENOMEM;
   15.39 @@ -337,17 +337,22 @@ int xc_linux_restore(int xc_handle, int 
   15.40              goto out;
   15.41          }
   15.42  
   15.43 -        for (i = 0; i < j; i++) {
   15.44 +        for ( i = 0; i < j; i++ )
   15.45 +        {
   15.46 +            unsigned long pfn, pagetype;
   15.47 +            pfn      = region_pfn_type[i] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
   15.48 +            pagetype = region_pfn_type[i] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
   15.49  
   15.50 -            if ((region_pfn_type[i] & LTAB_MASK) == XTAB)
   15.51 +            if ( pagetype == XEN_DOMCTL_PFINFO_XTAB)
   15.52                  region_mfn[i] = 0; /* we know map will fail, but don't care */
   15.53              else
   15.54 -                region_mfn[i] = p2m[region_pfn_type[i] & ~LTAB_MASK];
   15.55 -
   15.56 +                region_mfn[i] = p2m[pfn];
   15.57          }
   15.58  
   15.59 -        if (!(region_base = xc_map_foreign_batch(
   15.60 -                  xc_handle, dom, PROT_WRITE, region_mfn, j))) {
   15.61 +        region_base = xc_map_foreign_batch(
   15.62 +            xc_handle, dom, PROT_WRITE, region_mfn, j);
   15.63 +        if ( region_base == NULL )
   15.64 +        {
   15.65              ERR("map batch failed");
   15.66              goto out;
   15.67          }
   15.68 @@ -357,14 +362,15 @@ int xc_linux_restore(int xc_handle, int 
   15.69              void *page;
   15.70              unsigned long pagetype;
   15.71  
   15.72 -            pfn      = region_pfn_type[i] & ~LTAB_MASK;
   15.73 -            pagetype = region_pfn_type[i] & LTAB_MASK;
   15.74 +            pfn      = region_pfn_type[i] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
   15.75 +            pagetype = region_pfn_type[i] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
   15.76  
   15.77 -            if (pagetype == XTAB)
   15.78 +            if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
   15.79                  /* a bogus/unmapped page: skip it */
   15.80                  continue;
   15.81  
   15.82 -            if (pfn > max_pfn) {
   15.83 +            if ( pfn > max_pfn )
   15.84 +            {
   15.85                  ERR("pfn out of range");
   15.86                  goto out;
   15.87              }
   15.88 @@ -381,10 +387,11 @@ int xc_linux_restore(int xc_handle, int 
   15.89                  goto out;
   15.90              }
   15.91  
   15.92 -            pagetype &= LTABTYPE_MASK;
   15.93 +            pagetype &= XEN_DOMCTL_PFINFO_LTABTYPE_MASK;
   15.94  
   15.95 -            if(pagetype >= L1TAB && pagetype <= L4TAB) {
   15.96 -
   15.97 +            if ( (pagetype >= XEN_DOMCTL_PFINFO_L1TAB) && 
   15.98 +                 (pagetype <= XEN_DOMCTL_PFINFO_L4TAB) )
   15.99 +            {
  15.100                  /*
  15.101                  ** A page table page - need to 'uncanonicalize' it, i.e.
  15.102                  ** replace all the references to pfns with the corresponding
  15.103 @@ -396,7 +403,7 @@ int xc_linux_restore(int xc_handle, int 
  15.104                  */
  15.105                  if ((pt_levels != 3) ||
  15.106                      pae_extended_cr3 ||
  15.107 -                    (pagetype != L1TAB)) {
  15.108 +                    (pagetype != XEN_DOMCTL_PFINFO_L1TAB)) {
  15.109  
  15.110                      if (!uncanonicalize_pagetable(pagetype, page)) {
  15.111                          /*
  15.112 @@ -412,8 +419,9 @@ int xc_linux_restore(int xc_handle, int 
  15.113  
  15.114                  }
  15.115  
  15.116 -            } else if(pagetype != NOTAB) {
  15.117 -
  15.118 +            }
  15.119 +            else if ( pagetype != XEN_DOMCTL_PFINFO_NOTAB )
  15.120 +            {
  15.121                  ERR("Bogus page type %lx page table is out of range: "
  15.122                      "i=%d max_pfn=%lu", pagetype, i, max_pfn);
  15.123                  goto out;
  15.124 @@ -484,10 +492,12 @@ int xc_linux_restore(int xc_handle, int 
  15.125          int j, k;
  15.126  
  15.127          /* First pass: find all L3TABs current in > 4G mfns and get new mfns */
  15.128 -        for (i = 0; i < max_pfn; i++) {
  15.129 -
  15.130 -            if (((pfn_type[i] & LTABTYPE_MASK)==L3TAB) && (p2m[i]>0xfffffUL)) {
  15.131 -
  15.132 +        for ( i = 0; i < max_pfn; i++ )
  15.133 +        {
  15.134 +            if ( ((pfn_type[i] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) ==
  15.135 +                  XEN_DOMCTL_PFINFO_L3TAB) &&
  15.136 +                 (p2m[i] > 0xfffffUL) )
  15.137 +            {
  15.138                  unsigned long new_mfn;
  15.139                  uint64_t l3ptes[4];
  15.140                  uint64_t *l3tab;
  15.141 @@ -530,9 +540,11 @@ int xc_linux_restore(int xc_handle, int 
  15.142          /* Second pass: find all L1TABs and uncanonicalize them */
  15.143          j = 0;
  15.144  
  15.145 -        for(i = 0; i < max_pfn; i++) {
  15.146 -
  15.147 -            if (((pfn_type[i] & LTABTYPE_MASK)==L1TAB)) {
  15.148 +        for ( i = 0; i < max_pfn; i++ )
  15.149 +        {
  15.150 +            if ( ((pfn_type[i] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) ==
  15.151 +                  XEN_DOMCTL_PFINFO_L1TAB) )
  15.152 +            {
  15.153                  region_mfn[j] = p2m[i];
  15.154                  j++;
  15.155              }
  15.156 @@ -547,7 +559,7 @@ int xc_linux_restore(int xc_handle, int 
  15.157                  }
  15.158  
  15.159                  for(k = 0; k < j; k++) {
  15.160 -                    if(!uncanonicalize_pagetable(L1TAB,
  15.161 +                    if(!uncanonicalize_pagetable(XEN_DOMCTL_PFINFO_L1TAB,
  15.162                                                   region_base + k*PAGE_SIZE)) {
  15.163                          ERR("failed uncanonicalize pt!");
  15.164                          goto out;
  15.165 @@ -570,26 +582,26 @@ int xc_linux_restore(int xc_handle, int 
  15.166       * will barf when doing the type-checking.
  15.167       */
  15.168      nr_pins = 0;
  15.169 -    for (i = 0; i < max_pfn; i++) {
  15.170 -
  15.171 -        if ( (pfn_type[i] & LPINTAB) == 0 )
  15.172 +    for ( i = 0; i < max_pfn; i++ )
  15.173 +    {
  15.174 +        if ( (pfn_type[i] & XEN_DOMCTL_PFINFO_LPINTAB) == 0 )
  15.175              continue;
  15.176  
  15.177 -        switch (pfn_type[i]) {
  15.178 -
  15.179 -        case (L1TAB|LPINTAB):
  15.180 +        switch ( pfn_type[i] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK )
  15.181 +        {
  15.182 +        case XEN_DOMCTL_PFINFO_L1TAB:
  15.183              pin[nr_pins].cmd = MMUEXT_PIN_L1_TABLE;
  15.184              break;
  15.185  
  15.186 -        case (L2TAB|LPINTAB):
  15.187 +        case XEN_DOMCTL_PFINFO_L2TAB:
  15.188              pin[nr_pins].cmd = MMUEXT_PIN_L2_TABLE;
  15.189              break;
  15.190  
  15.191 -        case (L3TAB|LPINTAB):
  15.192 +        case XEN_DOMCTL_PFINFO_L3TAB:
  15.193              pin[nr_pins].cmd = MMUEXT_PIN_L3_TABLE;
  15.194              break;
  15.195  
  15.196 -        case (L4TAB|LPINTAB):
  15.197 +        case XEN_DOMCTL_PFINFO_L4TAB:
  15.198              pin[nr_pins].cmd = MMUEXT_PIN_L4_TABLE;
  15.199              break;
  15.200  
  15.201 @@ -678,7 +690,7 @@ int xc_linux_restore(int xc_handle, int 
  15.202  
  15.203      /* Uncanonicalise the suspend-record frame number and poke resume rec. */
  15.204      pfn = ctxt.user_regs.edx;
  15.205 -    if ((pfn >= max_pfn) || (pfn_type[pfn] != NOTAB)) {
  15.206 +    if ((pfn >= max_pfn) || (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB)) {
  15.207          ERR("Suspend record frame number is bad");
  15.208          goto out;
  15.209      }
  15.210 @@ -703,7 +715,7 @@ int xc_linux_restore(int xc_handle, int 
  15.211  
  15.212      for (i = 0; i < ctxt.gdt_ents; i += 512) {
  15.213          pfn = ctxt.gdt_frames[i];
  15.214 -        if ((pfn >= max_pfn) || (pfn_type[pfn] != NOTAB)) {
  15.215 +        if ((pfn >= max_pfn) || (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB)) {
  15.216              ERR("GDT frame number is bad");
  15.217              goto out;
  15.218          }
  15.219 @@ -719,11 +731,11 @@ int xc_linux_restore(int xc_handle, int 
  15.220          goto out;
  15.221      }
  15.222  
  15.223 -    if ( (pfn_type[pfn] & LTABTYPE_MASK) !=
  15.224 -         ((unsigned long)pt_levels<<LTAB_SHIFT) ) {
  15.225 +    if ( (pfn_type[pfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) !=
  15.226 +         ((unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) ) {
  15.227          ERR("PT base is bad. pfn=%lu nr=%lu type=%08lx %08lx",
  15.228              pfn, max_pfn, pfn_type[pfn],
  15.229 -            (unsigned long)pt_levels<<LTAB_SHIFT);
  15.230 +            (unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT);
  15.231          goto out;
  15.232      }
  15.233  
  15.234 @@ -744,7 +756,7 @@ int xc_linux_restore(int xc_handle, int 
  15.235      /* Uncanonicalise the pfn-to-mfn table frame-number list. */
  15.236      for (i = 0; i < P2M_FL_ENTRIES; i++) {
  15.237          pfn = p2m_frame_list[i];
  15.238 -        if ((pfn >= max_pfn) || (pfn_type[pfn] != NOTAB)) {
  15.239 +        if ((pfn >= max_pfn) || (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB)) {
  15.240              ERR("PFN-to-MFN frame number is bad");
  15.241              goto out;
  15.242          }
  15.243 @@ -797,11 +809,11 @@ int xc_linux_restore(int xc_handle, int 
  15.244  
  15.245      DPRINTF("Domain ready to be built.\n");
  15.246  
  15.247 -    op.cmd = DOM0_SETVCPUCONTEXT;
  15.248 -    op.u.setvcpucontext.domain = (domid_t)dom;
  15.249 -    op.u.setvcpucontext.vcpu   = 0;
  15.250 -    set_xen_guest_handle(op.u.setvcpucontext.ctxt, &ctxt);
  15.251 -    rc = xc_dom0_op(xc_handle, &op);
  15.252 +    domctl.cmd = XEN_DOMCTL_setvcpucontext;
  15.253 +    domctl.domain = (domid_t)dom;
  15.254 +    domctl.u.vcpucontext.vcpu   = 0;
  15.255 +    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt);
  15.256 +    rc = xc_domctl(xc_handle, &domctl);
  15.257  
  15.258      if (rc != 0) {
  15.259          ERR("Couldn't build the domain");
    16.1 --- a/tools/libxc/xc_linux_save.c	Fri Aug 25 10:39:24 2006 +0100
    16.2 +++ b/tools/libxc/xc_linux_save.c	Fri Aug 25 18:39:10 2006 +0100
    16.3 @@ -271,7 +271,7 @@ static inline ssize_t write_exact(int fd
    16.4  
    16.5  
    16.6  static int print_stats(int xc_handle, uint32_t domid, int pages_sent,
    16.7 -                       xc_shadow_control_stats_t *stats, int print)
    16.8 +                       xc_shadow_op_stats_t *stats, int print)
    16.9  {
   16.10      static struct timeval wall_last;
   16.11      static long long      d0_cpu_last;
   16.12 @@ -329,7 +329,7 @@ static int analysis_phase(int xc_handle,
   16.13                            unsigned long *arr, int runs)
   16.14  {
   16.15      long long start, now;
   16.16 -    xc_shadow_control_stats_t stats;
   16.17 +    xc_shadow_op_stats_t stats;
   16.18      int j;
   16.19  
   16.20      start = llgettimeofday();
   16.21 @@ -337,13 +337,13 @@ static int analysis_phase(int xc_handle,
   16.22      for (j = 0; j < runs; j++) {
   16.23          int i;
   16.24  
   16.25 -        xc_shadow_control(xc_handle, domid, DOM0_SHADOW_CONTROL_OP_CLEAN,
   16.26 +        xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_CLEAN,
   16.27                            arr, max_pfn, NULL, 0, NULL);
   16.28          DPRINTF("#Flush\n");
   16.29          for ( i = 0; i < 40; i++ ) {
   16.30              usleep(50000);
   16.31              now = llgettimeofday();
   16.32 -            xc_shadow_control(xc_handle, domid, DOM0_SHADOW_CONTROL_OP_PEEK,
   16.33 +            xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_PEEK,
   16.34                                NULL, 0, NULL, 0, &stats);
   16.35  
   16.36              DPRINTF("now= %lld faults= %"PRId32" dirty= %"PRId32"\n",
   16.37 @@ -427,10 +427,10 @@ int canonicalize_pagetable(unsigned long
   16.38      */
   16.39      xen_start = xen_end = pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8);
   16.40  
   16.41 -    if (pt_levels == 2 && type == L2TAB)
   16.42 +    if (pt_levels == 2 && type == XEN_DOMCTL_PFINFO_L2TAB)
   16.43          xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT);
   16.44  
   16.45 -    if (pt_levels == 3 && type == L3TAB)
   16.46 +    if (pt_levels == 3 && type == XEN_DOMCTL_PFINFO_L3TAB)
   16.47          xen_start = L3_PAGETABLE_ENTRIES_PAE;
   16.48  
   16.49      /*
   16.50 @@ -439,7 +439,7 @@ int canonicalize_pagetable(unsigned long
   16.51      ** Xen always ensures is present in that L2. Guests must ensure
   16.52      ** that this check will fail for other L2s.
   16.53      */
   16.54 -    if (pt_levels == 3 && type == L2TAB) {
   16.55 +    if (pt_levels == 3 && type == XEN_DOMCTL_PFINFO_L2TAB) {
   16.56  
   16.57  /* XXX index of the L2 entry in PAE mode which holds the guest LPT */
   16.58  #define PAE_GLPT_L2ENTRY (495)
   16.59 @@ -449,7 +449,7 @@ int canonicalize_pagetable(unsigned long
   16.60              xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff;
   16.61      }
   16.62  
   16.63 -    if (pt_levels == 4 && type == L4TAB) {
   16.64 +    if (pt_levels == 4 && type == XEN_DOMCTL_PFINFO_L4TAB) {
   16.65          /*
   16.66          ** XXX SMH: should compute these from hvirt_start (which we have)
   16.67          ** and hvirt_end (which we don't)
   16.68 @@ -603,7 +603,7 @@ int xc_linux_save(int xc_handle, int io_
   16.69         - to fixup by sending at the end if not already resent; */
   16.70      unsigned long *to_send = NULL, *to_skip = NULL, *to_fix = NULL;
   16.71  
   16.72 -    xc_shadow_control_stats_t stats;
   16.73 +    xc_shadow_op_stats_t stats;
   16.74  
   16.75      unsigned long needed_to_fix = 0;
   16.76      unsigned long total_sent    = 0;
   16.77 @@ -724,7 +724,7 @@ int xc_linux_save(int xc_handle, int io_
   16.78      if (live) {
   16.79  
   16.80          if (xc_shadow_control(xc_handle, dom,
   16.81 -                              DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY,
   16.82 +                              XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
   16.83                                NULL, 0, NULL, 0, NULL) < 0) {
   16.84              ERR("Couldn't enable shadow mode");
   16.85              goto out;
   16.86 @@ -781,8 +781,8 @@ int xc_linux_save(int xc_handle, int io_
   16.87      analysis_phase(xc_handle, dom, max_pfn, to_skip, 0);
   16.88  
   16.89      /* We want zeroed memory so use calloc rather than malloc. */
   16.90 -    pfn_type  = calloc(MAX_BATCH_SIZE, sizeof(unsigned long));
   16.91 -    pfn_batch = calloc(MAX_BATCH_SIZE, sizeof(unsigned long));
   16.92 +    pfn_type  = calloc(MAX_BATCH_SIZE, sizeof(*pfn_type));
   16.93 +    pfn_batch = calloc(MAX_BATCH_SIZE, sizeof(*pfn_batch));
   16.94  
   16.95      if ((pfn_type == NULL) || (pfn_batch == NULL)) {
   16.96          ERR("failed to alloc memory for pfn_type and/or pfn_batch arrays");
   16.97 @@ -790,12 +790,11 @@ int xc_linux_save(int xc_handle, int io_
   16.98          goto out;
   16.99      }
  16.100  
  16.101 -    if (mlock(pfn_type, MAX_BATCH_SIZE * sizeof(unsigned long))) {
  16.102 +    if (mlock(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type))) {
  16.103          ERR("Unable to mlock");
  16.104          goto out;
  16.105      }
  16.106  
  16.107 -
  16.108      /*
  16.109       * Quick belt and braces sanity check.
  16.110       */
  16.111 @@ -876,7 +875,7 @@ int xc_linux_save(int xc_handle, int io_
  16.112              /* slightly wasteful to peek the whole array evey time,
  16.113                 but this is fast enough for the moment. */
  16.114              if (!last_iter && xc_shadow_control(
  16.115 -                    xc_handle, dom, DOM0_SHADOW_CONTROL_OP_PEEK,
  16.116 +                    xc_handle, dom, XEN_DOMCTL_SHADOW_OP_PEEK,
  16.117                      to_skip, max_pfn, NULL, 0, NULL) != max_pfn) {
  16.118                  ERR("Error peeking shadow bitmap");
  16.119                  goto out;
  16.120 @@ -930,7 +929,7 @@ int xc_linux_save(int xc_handle, int io_
  16.121                  if(last_iter && test_bit(n, to_fix) && !test_bit(n, to_send)) {
  16.122                      needed_to_fix++;
  16.123                      DPRINTF("Fix! iter %d, pfn %x. mfn %lx\n",
  16.124 -                            iter,n,pfn_type[batch]);
  16.125 +                            iter, n, pfn_type[batch]);
  16.126                  }
  16.127  
  16.128                  clear_bit(n, to_fix);
  16.129 @@ -952,9 +951,12 @@ int xc_linux_save(int xc_handle, int io_
  16.130                  goto out;
  16.131              }
  16.132  
  16.133 -            for (j = 0; j < batch; j++) {
  16.134 +            for ( j = 0; j < batch; j++ )
  16.135 +            {
  16.136  
  16.137 -                if ((pfn_type[j] & LTAB_MASK) == XTAB) {
  16.138 +                if ( (pfn_type[j] & XEN_DOMCTL_PFINFO_LTAB_MASK) ==
  16.139 +                     XEN_DOMCTL_PFINFO_XTAB )
  16.140 +                {
  16.141                      DPRINTF("type fail: page %i mfn %08lx\n", j, pfn_type[j]);
  16.142                      continue;
  16.143                  }
  16.144 @@ -963,13 +965,16 @@ int xc_linux_save(int xc_handle, int io_
  16.145                      DPRINTF("%d pfn= %08lx mfn= %08lx [mfn]= %08lx"
  16.146                              " sum= %08lx\n",
  16.147                              iter,
  16.148 -                            (pfn_type[j] & LTAB_MASK) | pfn_batch[j],
  16.149 +                            (pfn_type[j] & XEN_DOMCTL_PFINFO_LTAB_MASK) |
  16.150 +                            pfn_batch[j],
  16.151                              pfn_type[j],
  16.152 -                            mfn_to_pfn(pfn_type[j]&(~LTAB_MASK)),
  16.153 +                            mfn_to_pfn(pfn_type[j] &
  16.154 +                                       ~XEN_DOMCTL_PFINFO_LTAB_MASK),
  16.155                              csum_page(region_base + (PAGE_SIZE*j)));
  16.156  
  16.157                  /* canonicalise mfn->pfn */
  16.158 -                pfn_type[j] = (pfn_type[j] & LTAB_MASK) | pfn_batch[j];
  16.159 +                pfn_type[j] = (pfn_type[j] & XEN_DOMCTL_PFINFO_LTAB_MASK) |
  16.160 +                    pfn_batch[j];
  16.161              }
  16.162  
  16.163              if(!write_exact(io_fd, &batch, sizeof(unsigned int))) {
  16.164 @@ -983,21 +988,23 @@ int xc_linux_save(int xc_handle, int io_
  16.165              }
  16.166  
  16.167              /* entering this loop, pfn_type is now in pfns (Not mfns) */
  16.168 -            for (j = 0; j < batch; j++) {
  16.169 +            for ( j = 0; j < batch; j++ )
  16.170 +            {
  16.171 +                unsigned long pfn, pagetype;
  16.172 +                void *spage = (char *)region_base + (PAGE_SIZE*j);
  16.173  
  16.174 -                unsigned long pfn      = pfn_type[j] & ~LTAB_MASK;
  16.175 -                unsigned long pagetype = pfn_type[j] & LTAB_MASK;
  16.176 -                void *spage            = (void *) region_base + (PAGE_SIZE*j);
  16.177 -
  16.178 +                pfn      = pfn_type[j] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
  16.179 +                pagetype = pfn_type[j] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
  16.180  
  16.181                  /* write out pages in batch */
  16.182 -                if (pagetype == XTAB)
  16.183 +                if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
  16.184                      continue;
  16.185  
  16.186 -                pagetype &= LTABTYPE_MASK;
  16.187 +                pagetype &= XEN_DOMCTL_PFINFO_LTABTYPE_MASK;
  16.188  
  16.189 -                if (pagetype >= L1TAB && pagetype <= L4TAB) {
  16.190 -
  16.191 +                if ( (pagetype >= XEN_DOMCTL_PFINFO_L1TAB) &&
  16.192 +                     (pagetype <= XEN_DOMCTL_PFINFO_L4TAB) )
  16.193 +                {
  16.194                      /* We have a pagetable page: need to rewrite it. */
  16.195                      race = 
  16.196                          canonicalize_pagetable(pagetype, pfn, spage, page); 
  16.197 @@ -1083,7 +1090,7 @@ int xc_linux_save(int xc_handle, int io_
  16.198              }
  16.199  
  16.200              if (xc_shadow_control(xc_handle, dom, 
  16.201 -                                  DOM0_SHADOW_CONTROL_OP_CLEAN, to_send, 
  16.202 +                                  XEN_DOMCTL_SHADOW_OP_CLEAN, to_send, 
  16.203                                    max_pfn, NULL, 0, &stats) != max_pfn) {
  16.204                  ERR("Error flushing shadow PT");
  16.205                  goto out;
  16.206 @@ -1174,7 +1181,7 @@ int xc_linux_save(int xc_handle, int io_
  16.207  
  16.208      if (live) {
  16.209          if(xc_shadow_control(xc_handle, dom, 
  16.210 -                             DOM0_SHADOW_CONTROL_OP_OFF,
  16.211 +                             XEN_DOMCTL_SHADOW_OP_OFF,
  16.212                               NULL, 0, NULL, 0, NULL) < 0) {
  16.213              DPRINTF("Warning - couldn't disable shadow mode");
  16.214          }
    17.1 --- a/tools/libxc/xc_misc.c	Fri Aug 25 10:39:24 2006 +0100
    17.2 +++ b/tools/libxc/xc_misc.c	Fri Aug 25 18:39:10 2006 +0100
    17.3 @@ -12,20 +12,20 @@ int xc_readconsolering(int xc_handle,
    17.4                         int clear)
    17.5  {
    17.6      int ret;
    17.7 -    DECLARE_DOM0_OP;
    17.8 +    DECLARE_SYSCTL;
    17.9      char *buffer = *pbuffer;
   17.10      unsigned int nr_chars = *pnr_chars;
   17.11  
   17.12 -    op.cmd = DOM0_READCONSOLE;
   17.13 -    set_xen_guest_handle(op.u.readconsole.buffer, buffer);
   17.14 -    op.u.readconsole.count  = nr_chars;
   17.15 -    op.u.readconsole.clear  = clear;
   17.16 +    sysctl.cmd = XEN_SYSCTL_readconsole;
   17.17 +    set_xen_guest_handle(sysctl.u.readconsole.buffer, buffer);
   17.18 +    sysctl.u.readconsole.count  = nr_chars;
   17.19 +    sysctl.u.readconsole.clear  = clear;
   17.20  
   17.21      if ( (ret = mlock(buffer, nr_chars)) != 0 )
   17.22          return ret;
   17.23  
   17.24 -    if ( (ret = do_dom0_op(xc_handle, &op)) == 0 )
   17.25 -        *pnr_chars = op.u.readconsole.count;
   17.26 +    if ( (ret = do_sysctl(xc_handle, &sysctl)) == 0 )
   17.27 +        *pnr_chars = sysctl.u.readconsole.count;
   17.28  
   17.29      safe_munlock(buffer, nr_chars);
   17.30  
   17.31 @@ -36,15 +36,14 @@ int xc_physinfo(int xc_handle,
   17.32                  xc_physinfo_t *put_info)
   17.33  {
   17.34      int ret;
   17.35 -    DECLARE_DOM0_OP;
   17.36 +    DECLARE_SYSCTL;
   17.37  
   17.38 -    op.cmd = DOM0_PHYSINFO;
   17.39 -    op.interface_version = DOM0_INTERFACE_VERSION;
   17.40 +    sysctl.cmd = XEN_SYSCTL_physinfo;
   17.41  
   17.42 -    if ( (ret = do_dom0_op(xc_handle, &op)) != 0 )
   17.43 +    if ( (ret = do_sysctl(xc_handle, &sysctl)) != 0 )
   17.44          return ret;
   17.45  
   17.46 -    memcpy(put_info, &op.u.physinfo, sizeof(*put_info));
   17.47 +    memcpy(put_info, &sysctl.u.physinfo, sizeof(*put_info));
   17.48  
   17.49      return 0;
   17.50  }
   17.51 @@ -53,15 +52,14 @@ int xc_sched_id(int xc_handle,
   17.52                  int *sched_id)
   17.53  {
   17.54      int ret;
   17.55 -    DECLARE_DOM0_OP;
   17.56 +    DECLARE_SYSCTL;
   17.57  
   17.58 -    op.cmd = DOM0_SCHED_ID;
   17.59 -    op.interface_version = DOM0_INTERFACE_VERSION;
   17.60 +    sysctl.cmd = XEN_SYSCTL_sched_id;
   17.61  
   17.62 -    if ( (ret = do_dom0_op(xc_handle, &op)) != 0 )
   17.63 +    if ( (ret = do_sysctl(xc_handle, &sysctl)) != 0 )
   17.64          return ret;
   17.65  
   17.66 -    *sched_id = op.u.sched_id.sched_id;
   17.67 +    *sched_id = sysctl.u.sched_id.sched_id;
   17.68  
   17.69      return 0;
   17.70  }
   17.71 @@ -74,19 +72,19 @@ int xc_perfc_control(int xc_handle,
   17.72                       int *nbr_val)
   17.73  {
   17.74      int rc;
   17.75 -    DECLARE_DOM0_OP;
   17.76 +    DECLARE_SYSCTL;
   17.77  
   17.78 -    op.cmd = DOM0_PERFCCONTROL;
   17.79 -    op.u.perfccontrol.op   = opcode;
   17.80 -    set_xen_guest_handle(op.u.perfccontrol.desc, desc);
   17.81 -    set_xen_guest_handle(op.u.perfccontrol.val, val);
   17.82 +    sysctl.cmd = XEN_SYSCTL_perfc_op;
   17.83 +    sysctl.u.perfc_op.cmd = opcode;
   17.84 +    set_xen_guest_handle(sysctl.u.perfc_op.desc, desc);
   17.85 +    set_xen_guest_handle(sysctl.u.perfc_op.val, val);
   17.86  
   17.87 -    rc = do_dom0_op(xc_handle, &op);
   17.88 +    rc = do_sysctl(xc_handle, &sysctl);
   17.89  
   17.90      if (nbr_desc)
   17.91 -        *nbr_desc = op.u.perfccontrol.nr_counters;
   17.92 +        *nbr_desc = sysctl.u.perfc_op.nr_counters;
   17.93      if (nbr_val)
   17.94 -        *nbr_val = op.u.perfccontrol.nr_vals;
   17.95 +        *nbr_val = sysctl.u.perfc_op.nr_vals;
   17.96  
   17.97      return rc;
   17.98  }
    18.1 --- a/tools/libxc/xc_private.c	Fri Aug 25 10:39:24 2006 +0100
    18.2 +++ b/tools/libxc/xc_private.c	Fri Aug 25 18:39:10 2006 +0100
    18.3 @@ -11,12 +11,12 @@
    18.4  int xc_get_pfn_type_batch(int xc_handle,
    18.5                            uint32_t dom, int num, unsigned long *arr)
    18.6  {
    18.7 -    DECLARE_DOM0_OP;
    18.8 -    op.cmd = DOM0_GETPAGEFRAMEINFO2;
    18.9 -    op.u.getpageframeinfo2.domain = (domid_t)dom;
   18.10 -    op.u.getpageframeinfo2.num    = num;
   18.11 -    set_xen_guest_handle(op.u.getpageframeinfo2.array, arr);
   18.12 -    return do_dom0_op(xc_handle, &op);
   18.13 +    DECLARE_DOMCTL;
   18.14 +    domctl.cmd = XEN_DOMCTL_getpageframeinfo2;
   18.15 +    domctl.domain = (domid_t)dom;
   18.16 +    domctl.u.getpageframeinfo2.num    = num;
   18.17 +    set_xen_guest_handle(domctl.u.getpageframeinfo2.array, arr);
   18.18 +    return do_domctl(xc_handle, &domctl);
   18.19  }
   18.20  
   18.21  #define GETPFN_ERR (~0U)
   18.22 @@ -24,16 +24,16 @@ unsigned int get_pfn_type(int xc_handle,
   18.23                            unsigned long mfn,
   18.24                            uint32_t dom)
   18.25  {
   18.26 -    DECLARE_DOM0_OP;
   18.27 -    op.cmd = DOM0_GETPAGEFRAMEINFO;
   18.28 -    op.u.getpageframeinfo.gmfn   = mfn;
   18.29 -    op.u.getpageframeinfo.domain = (domid_t)dom;
   18.30 -    if ( do_dom0_op(xc_handle, &op) < 0 )
   18.31 +    DECLARE_DOMCTL;
   18.32 +    domctl.cmd = XEN_DOMCTL_getpageframeinfo;
   18.33 +    domctl.u.getpageframeinfo.gmfn   = mfn;
   18.34 +    domctl.domain = (domid_t)dom;
   18.35 +    if ( do_domctl(xc_handle, &domctl) < 0 )
   18.36      {
   18.37          PERROR("Unexpected failure when getting page frame info!");
   18.38          return GETPFN_ERR;
   18.39      }
   18.40 -    return op.u.getpageframeinfo.type;
   18.41 +    return domctl.u.getpageframeinfo.type;
   18.42  }
   18.43  
   18.44  int xc_mmuext_op(
   18.45 @@ -248,17 +248,17 @@ int xc_memory_op(int xc_handle,
   18.46  
   18.47  long long xc_domain_get_cpu_usage( int xc_handle, domid_t domid, int vcpu )
   18.48  {
   18.49 -    DECLARE_DOM0_OP;
   18.50 +    DECLARE_DOMCTL;
   18.51  
   18.52 -    op.cmd = DOM0_GETVCPUINFO;
   18.53 -    op.u.getvcpuinfo.domain = (domid_t)domid;
   18.54 -    op.u.getvcpuinfo.vcpu   = (uint16_t)vcpu;
   18.55 -    if ( (do_dom0_op(xc_handle, &op) < 0) )
   18.56 +    domctl.cmd = XEN_DOMCTL_getvcpuinfo;
   18.57 +    domctl.domain = (domid_t)domid;
   18.58 +    domctl.u.getvcpuinfo.vcpu   = (uint16_t)vcpu;
   18.59 +    if ( (do_domctl(xc_handle, &domctl) < 0) )
   18.60      {
   18.61          PERROR("Could not get info on domain");
   18.62          return -1;
   18.63      }
   18.64 -    return op.u.getvcpuinfo.cpu_time;
   18.65 +    return domctl.u.getvcpuinfo.cpu_time;
   18.66  }
   18.67  
   18.68  
   18.69 @@ -268,12 +268,12 @@ int xc_get_pfn_list(int xc_handle,
   18.70                      xen_pfn_t *pfn_buf,
   18.71                      unsigned long max_pfns)
   18.72  {
   18.73 -    DECLARE_DOM0_OP;
   18.74 +    DECLARE_DOMCTL;
   18.75      int ret;
   18.76 -    op.cmd = DOM0_GETMEMLIST;
   18.77 -    op.u.getmemlist.domain   = (domid_t)domid;
   18.78 -    op.u.getmemlist.max_pfns = max_pfns;
   18.79 -    set_xen_guest_handle(op.u.getmemlist.buffer, pfn_buf);
   18.80 +    domctl.cmd = XEN_DOMCTL_getmemlist;
   18.81 +    domctl.domain   = (domid_t)domid;
   18.82 +    domctl.u.getmemlist.max_pfns = max_pfns;
   18.83 +    set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf);
   18.84  
   18.85  #ifdef VALGRIND
   18.86      memset(pfn_buf, 0, max_pfns * sizeof(xen_pfn_t));
   18.87 @@ -285,7 +285,7 @@ int xc_get_pfn_list(int xc_handle,
   18.88          return -1;
   18.89      }
   18.90  
   18.91 -    ret = do_dom0_op(xc_handle, &op);
   18.92 +    ret = do_domctl(xc_handle, &domctl);
   18.93  
   18.94      safe_munlock(pfn_buf, max_pfns * sizeof(xen_pfn_t));
   18.95  
   18.96 @@ -294,7 +294,7 @@ int xc_get_pfn_list(int xc_handle,
   18.97      DPRINTF(("Ret for xc_get_pfn_list is %d\n", ret));
   18.98      if (ret >= 0) {
   18.99          int i, j;
  18.100 -        for (i = 0; i < op.u.getmemlist.num_pfns; i += 16) {
  18.101 +        for (i = 0; i < domctl.u.getmemlist.num_pfns; i += 16) {
  18.102              DPRINTF("0x%x: ", i);
  18.103              for (j = 0; j < 16; j++)
  18.104                  DPRINTF("0x%lx ", pfn_buf[i + j]);
  18.105 @@ -304,17 +304,17 @@ int xc_get_pfn_list(int xc_handle,
  18.106  #endif
  18.107  #endif
  18.108  
  18.109 -    return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
  18.110 +    return (ret < 0) ? -1 : domctl.u.getmemlist.num_pfns;
  18.111  }
  18.112  #endif
  18.113  
  18.114  long xc_get_tot_pages(int xc_handle, uint32_t domid)
  18.115  {
  18.116 -    DECLARE_DOM0_OP;
  18.117 -    op.cmd = DOM0_GETDOMAININFO;
  18.118 -    op.u.getdomaininfo.domain = (domid_t)domid;
  18.119 -    return (do_dom0_op(xc_handle, &op) < 0) ?
  18.120 -        -1 : op.u.getdomaininfo.tot_pages;
  18.121 +    DECLARE_DOMCTL;
  18.122 +    domctl.cmd = XEN_DOMCTL_getdomaininfo;
  18.123 +    domctl.domain = (domid_t)domid;
  18.124 +    return (do_domctl(xc_handle, &domctl) < 0) ?
  18.125 +        -1 : domctl.u.getdomaininfo.tot_pages;
  18.126  }
  18.127  
  18.128  int xc_copy_to_domain_page(int xc_handle,
  18.129 @@ -386,9 +386,14 @@ void xc_map_memcpy(unsigned long dst, co
  18.130      }
  18.131  }
  18.132  
  18.133 -int xc_dom0_op(int xc_handle, dom0_op_t *op)
  18.134 +int xc_domctl(int xc_handle, struct xen_domctl *domctl)
  18.135  {
  18.136 -    return do_dom0_op(xc_handle, op);
  18.137 +    return do_domctl(xc_handle, domctl);
  18.138 +}
  18.139 +
  18.140 +int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl)
  18.141 +{
  18.142 +    return do_sysctl(xc_handle, sysctl);
  18.143  }
  18.144  
  18.145  int xc_version(int xc_handle, int cmd, void *arg)
    19.1 --- a/tools/libxc/xc_private.h	Fri Aug 25 10:39:24 2006 +0100
    19.2 +++ b/tools/libxc/xc_private.h	Fri Aug 25 18:39:10 2006 +0100
    19.3 @@ -18,14 +18,16 @@
    19.4  #include <xen/sys/privcmd.h>
    19.5  
    19.6  /* valgrind cannot see when a hypercall has filled in some values.  For this
    19.7 -   reason, we must zero the privcmd_hypercall_t or dom0_op_t instance before a
    19.8 -   call, if using valgrind.  */
    19.9 +   reason, we must zero the privcmd_hypercall_t or domctl/sysctl instance
   19.10 +   before a call, if using valgrind.  */
   19.11  #ifdef VALGRIND
   19.12  #define DECLARE_HYPERCALL privcmd_hypercall_t hypercall = { 0 }
   19.13 -#define DECLARE_DOM0_OP dom0_op_t op = { 0 }
   19.14 +#define DECLARE_DOMCTL struct xen_domctl domctl = { 0 }
   19.15 +#define DECLARE_SYSCTL struct xen_sysctl sysctl = { 0 }
   19.16  #else
   19.17  #define DECLARE_HYPERCALL privcmd_hypercall_t hypercall
   19.18 -#define DECLARE_DOM0_OP dom0_op_t op
   19.19 +#define DECLARE_DOMCTL struct xen_domctl domctl
   19.20 +#define DECLARE_SYSCTL struct xen_sysctl sysctl
   19.21  #endif
   19.22  
   19.23  #define PAGE_SHIFT              XC_PAGE_SHIFT
   19.24 @@ -94,17 +96,17 @@ static inline int do_xen_version(int xc_
   19.25      return do_xen_hypercall(xc_handle, &hypercall);
   19.26  }
   19.27  
   19.28 -static inline int do_dom0_op(int xc_handle, dom0_op_t *op)
   19.29 +static inline int do_domctl(int xc_handle, struct xen_domctl *domctl)
   19.30  {
   19.31      int ret = -1;
   19.32      DECLARE_HYPERCALL;
   19.33  
   19.34 -    op->interface_version = DOM0_INTERFACE_VERSION;
   19.35 +    domctl->interface_version = XEN_DOMCTL_INTERFACE_VERSION;
   19.36  
   19.37 -    hypercall.op     = __HYPERVISOR_dom0_op;
   19.38 -    hypercall.arg[0] = (unsigned long)op;
   19.39 +    hypercall.op     = __HYPERVISOR_domctl;
   19.40 +    hypercall.arg[0] = (unsigned long)domctl;
   19.41  
   19.42 -    if ( mlock(op, sizeof(*op)) != 0 )
   19.43 +    if ( mlock(domctl, sizeof(*domctl)) != 0 )
   19.44      {
   19.45          PERROR("Could not lock memory for Xen hypercall");
   19.46          goto out1;
   19.47 @@ -113,11 +115,40 @@ static inline int do_dom0_op(int xc_hand
   19.48      if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
   19.49      {
   19.50          if ( errno == EACCES )
   19.51 -            DPRINTF("Dom0 operation failed -- need to"
   19.52 +            DPRINTF("domctl operation failed -- need to"
   19.53                      " rebuild the user-space tool set?\n");
   19.54      }
   19.55  
   19.56 -    safe_munlock(op, sizeof(*op));
   19.57 +    safe_munlock(domctl, sizeof(*domctl));
   19.58 +
   19.59 + out1:
   19.60 +    return ret;
   19.61 +}
   19.62 +
   19.63 +static inline int do_sysctl(int xc_handle, struct xen_sysctl *sysctl)
   19.64 +{
   19.65 +    int ret = -1;
   19.66 +    DECLARE_HYPERCALL;
   19.67 +
   19.68 +    sysctl->interface_version = XEN_SYSCTL_INTERFACE_VERSION;
   19.69 +
   19.70 +    hypercall.op     = __HYPERVISOR_sysctl;
   19.71 +    hypercall.arg[0] = (unsigned long)sysctl;
   19.72 +
   19.73 +    if ( mlock(sysctl, sizeof(*sysctl)) != 0 )
   19.74 +    {
   19.75 +        PERROR("Could not lock memory for Xen hypercall");
   19.76 +        goto out1;
   19.77 +    }
   19.78 +
   19.79 +    if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
   19.80 +    {
   19.81 +        if ( errno == EACCES )
   19.82 +            DPRINTF("sysctl operation failed -- need to"
   19.83 +                    " rebuild the user-space tool set?\n");
   19.84 +    }
   19.85 +
   19.86 +    safe_munlock(sysctl, sizeof(*sysctl));
   19.87  
   19.88   out1:
   19.89      return ret;
    20.1 --- a/tools/libxc/xc_ptrace.c	Fri Aug 25 10:39:24 2006 +0100
    20.2 +++ b/tools/libxc/xc_ptrace.c	Fri Aug 25 18:39:10 2006 +0100
    20.3 @@ -41,8 +41,8 @@ static char *ptrace_names[] = {
    20.4  static int                      current_domid = -1;
    20.5  static int                      current_isfile;
    20.6  
    20.7 -static cpumap_t                 online_cpumap;
    20.8 -static cpumap_t                 regs_valid;
    20.9 +static uint64_t                 online_cpumap;
   20.10 +static uint64_t                 regs_valid;
   20.11  static vcpu_guest_context_t     ctxt[MAX_VIRT_CPUS];
   20.12  
   20.13  extern int ffsll(long long int);
   20.14 @@ -111,7 +111,8 @@ paging_enabled(vcpu_guest_context_t *v)
   20.15   */
   20.16  
   20.17  static int
   20.18 -get_online_cpumap(int xc_handle, dom0_getdomaininfo_t *d, cpumap_t *cpumap)
   20.19 +get_online_cpumap(int xc_handle, struct xen_domctl_getdomaininfo *d,
   20.20 +                  uint64_t *cpumap)
   20.21  {
   20.22      int i, online, retval;
   20.23  
   20.24 @@ -133,9 +134,9 @@ get_online_cpumap(int xc_handle, dom0_ge
   20.25   */
   20.26  
   20.27  static void
   20.28 -online_vcpus_changed(cpumap_t cpumap)
   20.29 +online_vcpus_changed(uint64_t cpumap)
   20.30  {
   20.31 -    cpumap_t changed_cpumap = cpumap ^ online_cpumap;
   20.32 +    uint64_t changed_cpumap = cpumap ^ online_cpumap;
   20.33      int index;
   20.34  
   20.35      while ( (index = ffsll(changed_cpumap)) ) {
   20.36 @@ -418,25 +419,25 @@ static int
   20.37      int *status,
   20.38      int options)
   20.39  {
   20.40 -    DECLARE_DOM0_OP;
   20.41 +    DECLARE_DOMCTL;
   20.42      int retval;
   20.43      struct timespec ts;
   20.44 -    cpumap_t cpumap;
   20.45 +    uint64_t cpumap;
   20.46  
   20.47      ts.tv_sec = 0;
   20.48      ts.tv_nsec = 10*1000*1000;
   20.49  
   20.50 -    op.cmd = DOM0_GETDOMAININFO;
   20.51 -    op.u.getdomaininfo.domain = domain;
   20.52 +    domctl.cmd = XEN_DOMCTL_getdomaininfo;
   20.53 +    domctl.domain = domain;
   20.54  
   20.55   retry:
   20.56 -    retval = do_dom0_op(xc_handle, &op);
   20.57 -    if ( retval || (op.u.getdomaininfo.domain != domain) )
   20.58 +    retval = do_domctl(xc_handle, &domctl);
   20.59 +    if ( retval || (domctl.domain != domain) )
   20.60      {
   20.61          IPRINTF("getdomaininfo failed\n");
   20.62          goto done;
   20.63      }
   20.64 -    *status = op.u.getdomaininfo.flags;
   20.65 +    *status = domctl.u.getdomaininfo.flags;
   20.66  
   20.67      if ( options & WNOHANG )
   20.68          goto done;
   20.69 @@ -447,13 +448,13 @@ static int
   20.70          goto done;
   20.71      }
   20.72  
   20.73 -    if ( !(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) )
   20.74 +    if ( !(domctl.u.getdomaininfo.flags & DOMFLAGS_PAUSED) )
   20.75      {
   20.76          nanosleep(&ts,NULL);
   20.77          goto retry;
   20.78      }
   20.79   done:
   20.80 -    if (get_online_cpumap(xc_handle, &op.u.getdomaininfo, &cpumap))
   20.81 +    if (get_online_cpumap(xc_handle, &domctl.u.getdomaininfo, &cpumap))
   20.82          IPRINTF("get_online_cpumap failed\n");
   20.83      if (online_cpumap != cpumap)
   20.84          online_vcpus_changed(cpumap);
   20.85 @@ -470,11 +471,11 @@ xc_ptrace(
   20.86      long eaddr,
   20.87      long edata)
   20.88  {
   20.89 -    DECLARE_DOM0_OP;
   20.90 +    DECLARE_DOMCTL;
   20.91      struct gdb_regs pt;
   20.92      long            retval = 0;
   20.93      unsigned long  *guest_va;
   20.94 -    cpumap_t        cpumap;
   20.95 +    uint64_t        cpumap;
   20.96      int             cpu, index;
   20.97      void           *addr = (char *)eaddr;
   20.98      void           *data = (char *)edata;
   20.99 @@ -535,7 +536,7 @@ xc_ptrace(
  20.100          SET_XC_REGS(((struct gdb_regs *)data), ctxt[cpu].user_regs);
  20.101          if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
  20.102                                  &ctxt[cpu])))
  20.103 -            goto out_error_dom0;
  20.104 +            goto out_error_domctl;
  20.105          break;
  20.106  
  20.107      case PTRACE_SINGLESTEP:
  20.108 @@ -547,7 +548,7 @@ xc_ptrace(
  20.109          ctxt[cpu].user_regs.eflags |= PSL_T;
  20.110          if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
  20.111                                  &ctxt[cpu])))
  20.112 -            goto out_error_dom0;
  20.113 +            goto out_error_domctl;
  20.114          /* FALLTHROUGH */
  20.115  
  20.116      case PTRACE_CONT:
  20.117 @@ -566,22 +567,22 @@ xc_ptrace(
  20.118                      ctxt[cpu].user_regs.eflags &= ~PSL_T;
  20.119                      if ((retval = xc_vcpu_setcontext(xc_handle, current_domid,
  20.120                                                  cpu, &ctxt[cpu])))
  20.121 -                        goto out_error_dom0;
  20.122 +                        goto out_error_domctl;
  20.123                  }
  20.124              }
  20.125          }
  20.126          if ( request == PTRACE_DETACH )
  20.127          {
  20.128 -            op.cmd = DOM0_SETDEBUGGING;
  20.129 -            op.u.setdebugging.domain = current_domid;
  20.130 -            op.u.setdebugging.enable = 0;
  20.131 -            if ((retval = do_dom0_op(xc_handle, &op)))
  20.132 -                goto out_error_dom0;
  20.133 +            domctl.cmd = XEN_DOMCTL_setdebugging;
  20.134 +            domctl.domain = current_domid;
  20.135 +            domctl.u.setdebugging.enable = 0;
  20.136 +            if ((retval = do_domctl(xc_handle, &domctl)))
  20.137 +                goto out_error_domctl;
  20.138          }
  20.139          regs_valid = 0;
  20.140          if ((retval = xc_domain_unpause(xc_handle, current_domid > 0 ?
  20.141                                  current_domid : -current_domid)))
  20.142 -            goto out_error_dom0;
  20.143 +            goto out_error_domctl;
  20.144          break;
  20.145  
  20.146      case PTRACE_ATTACH:
  20.147 @@ -589,22 +590,22 @@ xc_ptrace(
  20.148          current_isfile = (int)edata;
  20.149          if (current_isfile)
  20.150              break;
  20.151 -        op.cmd = DOM0_GETDOMAININFO;
  20.152 -        op.u.getdomaininfo.domain = current_domid;
  20.153 -        retval = do_dom0_op(xc_handle, &op);
  20.154 -        if ( retval || (op.u.getdomaininfo.domain != current_domid) )
  20.155 -            goto out_error_dom0;
  20.156 -        if ( op.u.getdomaininfo.flags & DOMFLAGS_PAUSED )
  20.157 +        domctl.cmd = XEN_DOMCTL_getdomaininfo;
  20.158 +        domctl.domain = current_domid;
  20.159 +        retval = do_domctl(xc_handle, &domctl);
  20.160 +        if ( retval || (domctl.domain != current_domid) )
  20.161 +            goto out_error_domctl;
  20.162 +        if ( domctl.u.getdomaininfo.flags & DOMFLAGS_PAUSED )
  20.163              IPRINTF("domain currently paused\n");
  20.164          else if ((retval = xc_domain_pause(xc_handle, current_domid)))
  20.165 -            goto out_error_dom0;
  20.166 -        op.cmd = DOM0_SETDEBUGGING;
  20.167 -        op.u.setdebugging.domain = current_domid;
  20.168 -        op.u.setdebugging.enable = 1;
  20.169 -        if ((retval = do_dom0_op(xc_handle, &op)))
  20.170 -            goto out_error_dom0;
  20.171 +            goto out_error_domctl;
  20.172 +        domctl.cmd = XEN_DOMCTL_setdebugging;
  20.173 +        domctl.domain = current_domid;
  20.174 +        domctl.u.setdebugging.enable = 1;
  20.175 +        if ((retval = do_domctl(xc_handle, &domctl)))
  20.176 +            goto out_error_domctl;
  20.177  
  20.178 -        if (get_online_cpumap(xc_handle, &op.u.getdomaininfo, &cpumap))
  20.179 +        if (get_online_cpumap(xc_handle, &domctl.u.getdomaininfo, &cpumap))
  20.180              IPRINTF("get_online_cpumap failed\n");
  20.181          if (online_cpumap != cpumap)
  20.182              online_vcpus_changed(cpumap);
  20.183 @@ -625,8 +626,8 @@ xc_ptrace(
  20.184  
  20.185      return retval;
  20.186  
  20.187 - out_error_dom0:
  20.188 -    perror("dom0 op failed");
  20.189 + out_error_domctl:
  20.190 +    perror("domctl failed");
  20.191   out_error:
  20.192      errno = EINVAL;
  20.193      return retval;
    21.1 --- a/tools/libxc/xc_sedf.c	Fri Aug 25 10:39:24 2006 +0100
    21.2 +++ b/tools/libxc/xc_sedf.c	Fri Aug 25 18:39:10 2006 +0100
    21.3 @@ -10,37 +10,50 @@
    21.4  
    21.5  #include "xc_private.h"
    21.6  
    21.7 -int xc_sedf_domain_set(int xc_handle,
    21.8 -                          uint32_t domid, uint64_t period, uint64_t slice,uint64_t latency, uint16_t extratime,uint16_t weight)
    21.9 +int xc_sedf_domain_set(
   21.10 +    int xc_handle,
   21.11 +    uint32_t domid,
   21.12 +    uint64_t period,
   21.13 +    uint64_t slice,
   21.14 +    uint64_t latency,
   21.15 +    uint16_t extratime,
   21.16 +    uint16_t weight)
   21.17  {
   21.18 -    DECLARE_DOM0_OP;
   21.19 -    struct sedf_adjdom *p = &op.u.adjustdom.u.sedf;
   21.20 +    DECLARE_DOMCTL;
   21.21 +    struct xen_domctl_sched_sedf *p = &domctl.u.scheduler_op.u.sedf;
   21.22  
   21.23 -    op.cmd = DOM0_ADJUSTDOM;
   21.24 -    op.u.adjustdom.domain  = (domid_t)domid;
   21.25 -    op.u.adjustdom.sched_id = SCHED_SEDF;
   21.26 -    op.u.adjustdom.direction = SCHED_INFO_PUT;
   21.27 +    domctl.cmd = XEN_DOMCTL_scheduler_op;
   21.28 +    domctl.domain  = (domid_t)domid;
   21.29 +    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_SEDF;
   21.30 +    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
   21.31  
   21.32      p->period    = period;
   21.33      p->slice     = slice;
   21.34      p->latency   = latency;
   21.35      p->extratime = extratime;
   21.36      p->weight    = weight;
   21.37 -    return do_dom0_op(xc_handle, &op);
   21.38 +    return do_domctl(xc_handle, &domctl);
   21.39  }
   21.40  
   21.41 -int xc_sedf_domain_get(int xc_handle, uint32_t domid, uint64_t *period, uint64_t *slice, uint64_t* latency, uint16_t* extratime, uint16_t* weight)
   21.42 +int xc_sedf_domain_get(
   21.43 +    int xc_handle,
   21.44 +    uint32_t domid,
   21.45 +    uint64_t *period,
   21.46 +    uint64_t *slice,
   21.47 +    uint64_t *latency,
   21.48 +    uint16_t *extratime,
   21.49 +    uint16_t *weight)
   21.50  {
   21.51 -    DECLARE_DOM0_OP;
   21.52 +    DECLARE_DOMCTL;
   21.53      int ret;
   21.54 -    struct sedf_adjdom *p = &op.u.adjustdom.u.sedf;
   21.55 +    struct xen_domctl_sched_sedf *p = &domctl.u.scheduler_op.u.sedf;
   21.56  
   21.57 -    op.cmd = DOM0_ADJUSTDOM;
   21.58 -    op.u.adjustdom.domain = (domid_t)domid;
   21.59 -    op.u.adjustdom.sched_id = SCHED_SEDF;
   21.60 -    op.u.adjustdom.direction = SCHED_INFO_GET;
   21.61 +    domctl.cmd = XEN_DOMCTL_scheduler_op;
   21.62 +    domctl.domain = (domid_t)domid;
   21.63 +    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_SEDF;
   21.64 +    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
   21.65  
   21.66 -    ret = do_dom0_op(xc_handle, &op);
   21.67 +    ret = do_domctl(xc_handle, &domctl);
   21.68  
   21.69      *period    = p->period;
   21.70      *slice     = p->slice;
    22.1 --- a/tools/libxc/xc_tbuf.c	Fri Aug 25 10:39:24 2006 +0100
    22.2 +++ b/tools/libxc/xc_tbuf.c	Fri Aug 25 18:39:10 2006 +0100
    22.3 @@ -18,49 +18,49 @@
    22.4  
    22.5  static int tbuf_enable(int xc_handle, int enable)
    22.6  {
    22.7 -    DECLARE_DOM0_OP;
    22.8 +    DECLARE_SYSCTL;
    22.9  
   22.10 -    op.cmd = DOM0_TBUFCONTROL;
   22.11 -    op.interface_version = DOM0_INTERFACE_VERSION;
   22.12 +    sysctl.cmd = XEN_SYSCTL_tbuf_op;
   22.13 +    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
   22.14      if (enable)
   22.15 -        op.u.tbufcontrol.op  = DOM0_TBUF_ENABLE;
   22.16 +        sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_enable;
   22.17      else
   22.18 -        op.u.tbufcontrol.op  = DOM0_TBUF_DISABLE;
   22.19 +        sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_disable;
   22.20  
   22.21 -    return xc_dom0_op(xc_handle, &op);
   22.22 +    return xc_sysctl(xc_handle, &sysctl);
   22.23  }
   22.24  
   22.25  int xc_tbuf_set_size(int xc_handle, unsigned long size)
   22.26  {
   22.27 -    DECLARE_DOM0_OP;
   22.28 +    DECLARE_SYSCTL;
   22.29  
   22.30 -    op.cmd = DOM0_TBUFCONTROL;
   22.31 -    op.interface_version = DOM0_INTERFACE_VERSION;
   22.32 -    op.u.tbufcontrol.op  = DOM0_TBUF_SET_SIZE;
   22.33 -    op.u.tbufcontrol.size = size;
   22.34 +    sysctl.cmd = XEN_SYSCTL_tbuf_op;
   22.35 +    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
   22.36 +    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_set_size;
   22.37 +    sysctl.u.tbuf_op.size = size;
   22.38  
   22.39 -    return xc_dom0_op(xc_handle, &op);
   22.40 +    return xc_sysctl(xc_handle, &sysctl);
   22.41  }
   22.42  
   22.43  int xc_tbuf_get_size(int xc_handle, unsigned long *size)
   22.44  {
   22.45      int rc;
   22.46 -    DECLARE_DOM0_OP;
   22.47 +    DECLARE_SYSCTL;
   22.48  
   22.49 -    op.cmd = DOM0_TBUFCONTROL;
   22.50 -    op.interface_version = DOM0_INTERFACE_VERSION;
   22.51 -    op.u.tbufcontrol.op  = DOM0_TBUF_GET_INFO;
   22.52 +    sysctl.cmd = XEN_SYSCTL_tbuf_op;
   22.53 +    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
   22.54 +    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_get_info;
   22.55  
   22.56 -    rc = xc_dom0_op(xc_handle, &op);
   22.57 +    rc = xc_sysctl(xc_handle, &sysctl);
   22.58      if (rc == 0)
   22.59 -        *size = op.u.tbufcontrol.size;
   22.60 +        *size = sysctl.u.tbuf_op.size;
   22.61      return rc;
   22.62  }
   22.63  
   22.64  int xc_tbuf_enable(int xc_handle, size_t cnt, unsigned long *mfn,
   22.65                     unsigned long *size)
   22.66  {
   22.67 -    DECLARE_DOM0_OP;
   22.68 +    DECLARE_SYSCTL;
   22.69      int rc;
   22.70  
   22.71      /*
   22.72 @@ -73,15 +73,15 @@ int xc_tbuf_enable(int xc_handle, size_t
   22.73      if ( tbuf_enable(xc_handle, 1) != 0 )
   22.74          return -1;
   22.75  
   22.76 -    op.cmd = DOM0_TBUFCONTROL;
   22.77 -    op.interface_version = DOM0_INTERFACE_VERSION;
   22.78 -    op.u.tbufcontrol.op  = DOM0_TBUF_GET_INFO;
   22.79 +    sysctl.cmd = XEN_SYSCTL_tbuf_op;
   22.80 +    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
   22.81 +    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_get_info;
   22.82  
   22.83 -    rc = xc_dom0_op(xc_handle, &op);
   22.84 +    rc = xc_sysctl(xc_handle, &sysctl);
   22.85      if ( rc == 0 )
   22.86      {
   22.87 -        *size = op.u.tbufcontrol.size;
   22.88 -        *mfn = op.u.tbufcontrol.buffer_mfn;
   22.89 +        *size = sysctl.u.tbuf_op.size;
   22.90 +        *mfn = sysctl.u.tbuf_op.buffer_mfn;
   22.91      }
   22.92  
   22.93      return 0;
   22.94 @@ -94,25 +94,39 @@ int xc_tbuf_disable(int xc_handle)
   22.95  
   22.96  int xc_tbuf_set_cpu_mask(int xc_handle, uint32_t mask)
   22.97  {
   22.98 -    DECLARE_DOM0_OP;
   22.99 +    DECLARE_SYSCTL;
  22.100 +    int ret = -1;
  22.101 +
  22.102 +    sysctl.cmd = XEN_SYSCTL_tbuf_op;
  22.103 +    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
  22.104 +    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_set_cpu_mask;
  22.105 +
  22.106 +    set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, (uint8_t *)&mask);
  22.107 +    sysctl.u.tbuf_op.cpu_mask.nr_cpus = sizeof(mask) * 8;
  22.108  
  22.109 -    op.cmd = DOM0_TBUFCONTROL;
  22.110 -    op.interface_version = DOM0_INTERFACE_VERSION;
  22.111 -    op.u.tbufcontrol.op  = DOM0_TBUF_SET_CPU_MASK;
  22.112 -    op.u.tbufcontrol.cpu_mask = mask;
  22.113 +    if ( mlock(&mask, sizeof(mask)) != 0 )
  22.114 +    {
  22.115 +        PERROR("Could not lock memory for Xen hypercall");
  22.116 +        goto out;
  22.117 +    }
  22.118  
  22.119 -    return do_dom0_op(xc_handle, &op);
  22.120 +    ret = do_sysctl(xc_handle, &sysctl);
  22.121 +
  22.122 +    safe_munlock(&mask, sizeof(mask));
  22.123 +
  22.124 + out:
  22.125 +    return ret;
  22.126  }
  22.127  
  22.128  int xc_tbuf_set_evt_mask(int xc_handle, uint32_t mask)
  22.129  {
  22.130 -    DECLARE_DOM0_OP;
  22.131 +    DECLARE_SYSCTL;
  22.132  
  22.133 -    op.cmd = DOM0_TBUFCONTROL;
  22.134 -    op.interface_version = DOM0_INTERFACE_VERSION;
  22.135 -    op.u.tbufcontrol.op  = DOM0_TBUF_SET_EVT_MASK;
  22.136 -    op.u.tbufcontrol.evt_mask = mask;
  22.137 +    sysctl.cmd = XEN_SYSCTL_tbuf_op;
  22.138 +    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
  22.139 +    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_set_evt_mask;
  22.140 +    sysctl.u.tbuf_op.evt_mask = mask;
  22.141  
  22.142 -    return do_dom0_op(xc_handle, &op);
  22.143 +    return do_sysctl(xc_handle, &sysctl);
  22.144  }
  22.145  
    23.1 --- a/tools/libxc/xenctrl.h	Fri Aug 25 10:39:24 2006 +0100
    23.2 +++ b/tools/libxc/xenctrl.h	Fri Aug 25 18:39:10 2006 +0100
    23.3 @@ -13,11 +13,11 @@
    23.4  #include <stdint.h>
    23.5  #include <sys/ptrace.h>
    23.6  #include <xen/xen.h>
    23.7 -#include <xen/dom0_ops.h>
    23.8 +#include <xen/domctl.h>
    23.9 +#include <xen/sysctl.h>
   23.10  #include <xen/version.h>
   23.11  #include <xen/event_channel.h>
   23.12  #include <xen/sched.h>
   23.13 -#include <xen/sched_ctl.h>
   23.14  #include <xen/memory.h>
   23.15  #include <xen/acm.h>
   23.16  #include <xen/acm_ops.h>
   23.17 @@ -139,7 +139,7 @@ typedef struct {
   23.18      xen_domain_handle_t handle;
   23.19  } xc_dominfo_t;
   23.20  
   23.21 -typedef dom0_getdomaininfo_t xc_domaininfo_t;
   23.22 +typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
   23.23  int xc_domain_create(int xc_handle,
   23.24                       uint32_t ssidref,
   23.25                       xen_domain_handle_t handle,
   23.26 @@ -231,7 +231,11 @@ int xc_domain_shutdown(int xc_handle,
   23.27  int xc_vcpu_setaffinity(int xc_handle,
   23.28                          uint32_t domid,
   23.29                          int vcpu,
   23.30 -                        cpumap_t cpumap);
   23.31 +                        uint64_t cpumap);
   23.32 +int xc_vcpu_getaffinity(int xc_handle,
   23.33 +                        uint32_t domid,
   23.34 +                        int vcpu,
   23.35 +                        uint64_t *cpumap);
   23.36  
   23.37  /**
   23.38   * This function will return information about one or more domains. It is
   23.39 @@ -301,7 +305,7 @@ int xc_vcpu_getcontext(int xc_handle,
   23.40                                 uint32_t vcpu,
   23.41                                 vcpu_guest_context_t *ctxt);
   23.42  
   23.43 -typedef dom0_getvcpuinfo_t xc_vcpuinfo_t;
   23.44 +typedef xen_domctl_getvcpuinfo_t xc_vcpuinfo_t;
   23.45  int xc_vcpu_getinfo(int xc_handle,
   23.46                      uint32_t domid,
   23.47                      uint32_t vcpu,
   23.48 @@ -317,7 +321,7 @@ long long xc_domain_get_cpu_usage(int xc
   23.49  int xc_domain_sethandle(int xc_handle, uint32_t domid,
   23.50                          xen_domain_handle_t handle);
   23.51  
   23.52 -typedef dom0_shadow_control_stats_t xc_shadow_control_stats_t;
   23.53 +typedef xen_domctl_shadow_op_stats_t xc_shadow_op_stats_t;
   23.54  int xc_shadow_control(int xc_handle,
   23.55                        uint32_t domid,
   23.56                        unsigned int sop,
   23.57 @@ -325,7 +329,7 @@ int xc_shadow_control(int xc_handle,
   23.58                        unsigned long pages,
   23.59                        unsigned long *mb,
   23.60                        uint32_t mode,
   23.61 -                      xc_shadow_control_stats_t *stats);
   23.62 +                      xc_shadow_op_stats_t *stats);
   23.63  
   23.64  int xc_sedf_domain_set(int xc_handle,
   23.65                         uint32_t domid,
   23.66 @@ -341,11 +345,11 @@ int xc_sedf_domain_get(int xc_handle,
   23.67  
   23.68  int xc_sched_credit_domain_set(int xc_handle,
   23.69                                 uint32_t domid,
   23.70 -                               struct sched_credit_adjdom *sdom);
   23.71 +                               struct xen_domctl_sched_credit *sdom);
   23.72  
   23.73  int xc_sched_credit_domain_get(int xc_handle,
   23.74                                 uint32_t domid,
   23.75 -                               struct sched_credit_adjdom *sdom);
   23.76 +                               struct xen_domctl_sched_credit *sdom);
   23.77  
   23.78  /*
   23.79   * EVENT CHANNEL FUNCTIONS
   23.80 @@ -377,7 +381,7 @@ int xc_readconsolering(int xc_handle,
   23.81                         unsigned int *pnr_chars,
   23.82                         int clear);
   23.83  
   23.84 -typedef dom0_physinfo_t xc_physinfo_t;
   23.85 +typedef xen_sysctl_physinfo_t xc_physinfo_t;
   23.86  int xc_physinfo(int xc_handle,
   23.87                  xc_physinfo_t *info);
   23.88  
   23.89 @@ -438,8 +442,8 @@ int xc_domain_iomem_permission(int xc_ha
   23.90  unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
   23.91                                      unsigned long mfn);
   23.92  
   23.93 -typedef dom0_perfc_desc_t xc_perfc_desc_t;
   23.94 -typedef dom0_perfc_val_t xc_perfc_val_t;
   23.95 +typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
   23.96 +typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
   23.97  /* IMPORTANT: The caller is responsible for mlock()'ing the @desc and @val
   23.98     arrays. */
   23.99  int xc_perfc_control(int xc_handle,
  23.100 @@ -561,8 +565,8 @@ int xc_tbuf_set_cpu_mask(int xc_handle, 
  23.101  
  23.102  int xc_tbuf_set_evt_mask(int xc_handle, uint32_t mask);
  23.103  
  23.104 -/* Execute a privileged dom0 operation. */
  23.105 -int xc_dom0_op(int xc_handle, dom0_op_t *op);
  23.106 +int xc_domctl(int xc_handle, struct xen_domctl *domctl);
  23.107 +int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl);
  23.108  
  23.109  int xc_version(int xc_handle, int cmd, void *arg);
  23.110  
    24.1 --- a/tools/libxc/xg_private.h	Fri Aug 25 10:39:24 2006 +0100
    24.2 +++ b/tools/libxc/xg_private.h	Fri Aug 25 18:39:10 2006 +0100
    24.3 @@ -19,15 +19,6 @@
    24.4  #include <xen/memory.h>
    24.5  #include <xen/elfnote.h>
    24.6  
    24.7 -/* valgrind cannot see when a hypercall has filled in some values.  For this
    24.8 -   reason, we must zero the dom0_op_t instance before a call, if using
    24.9 -   valgrind.  */
   24.10 -#ifdef VALGRIND
   24.11 -#define DECLARE_DOM0_OP dom0_op_t op = { 0 }
   24.12 -#else
   24.13 -#define DECLARE_DOM0_OP dom0_op_t op
   24.14 -#endif
   24.15 -
   24.16  #ifndef ELFSIZE
   24.17  #include <limits.h>
   24.18  #if UINT_MAX == ULONG_MAX
    25.1 --- a/tools/misc/xenperf.c	Fri Aug 25 10:39:24 2006 +0100
    25.2 +++ b/tools/misc/xenperf.c	Fri Aug 25 18:39:10 2006 +0100
    25.3 @@ -64,7 +64,7 @@ int main(int argc, char *argv[])
    25.4      
    25.5      if ( reset )
    25.6      {
    25.7 -        if ( xc_perfc_control(xc_handle, DOM0_PERFCCONTROL_OP_RESET,
    25.8 +        if ( xc_perfc_control(xc_handle, XEN_SYSCTL_PERFCOP_reset,
    25.9                                NULL, NULL, NULL, NULL) != 0 )
   25.10          {
   25.11              fprintf(stderr, "Error reseting performance counters: %d (%s)\n",
   25.12 @@ -75,7 +75,7 @@ int main(int argc, char *argv[])
   25.13          return 0;
   25.14      }
   25.15  
   25.16 -	if ( xc_perfc_control(xc_handle, DOM0_PERFCCONTROL_OP_QUERY,
   25.17 +	if ( xc_perfc_control(xc_handle, XEN_SYSCTL_PERFCOP_query,
   25.18  						  NULL, NULL, &num_desc, &num_val) != 0 )
   25.19          {
   25.20              fprintf(stderr, "Error getting number of perf counters: %d (%s)\n",
   25.21 @@ -96,7 +96,7 @@ int main(int argc, char *argv[])
   25.22          exit(-1);
   25.23      }
   25.24  
   25.25 -    if ( xc_perfc_control(xc_handle, DOM0_PERFCCONTROL_OP_QUERY,
   25.26 +    if ( xc_perfc_control(xc_handle, XEN_SYSCTL_PERFCOP_query,
   25.27  						  pcd, pcv, NULL, NULL) != 0 )
   25.28      {
   25.29          fprintf(stderr, "Error getting perf counter: %d (%s)\n",
    26.1 --- a/tools/python/xen/lowlevel/xc/xc.c	Fri Aug 25 10:39:24 2006 +0100
    26.2 +++ b/tools/python/xen/lowlevel/xc/xc.c	Fri Aug 25 18:39:10 2006 +0100
    26.3 @@ -141,7 +141,7 @@ static PyObject *pyxc_vcpu_setaffinity(X
    26.4  {
    26.5      uint32_t dom;
    26.6      int vcpu = 0, i;
    26.7 -    cpumap_t cpumap = ~0ULL;
    26.8 +    uint64_t  cpumap = ~0ULL;
    26.9      PyObject *cpulist = NULL;
   26.10  
   26.11      static char *kwd_list[] = { "dom", "vcpu", "cpumap", NULL };
   26.12 @@ -154,7 +154,7 @@ static PyObject *pyxc_vcpu_setaffinity(X
   26.13      {
   26.14          cpumap = 0ULL;
   26.15          for ( i = 0; i < PyList_Size(cpulist); i++ ) 
   26.16 -            cpumap |= (cpumap_t)1 << PyInt_AsLong(PyList_GetItem(cpulist, i));
   26.17 +            cpumap |= (uint64_t)1 << PyInt_AsLong(PyList_GetItem(cpulist, i));
   26.18      }
   26.19    
   26.20      if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
   26.21 @@ -289,7 +289,7 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
   26.22      uint32_t dom, vcpu = 0;
   26.23      xc_vcpuinfo_t info;
   26.24      int rc, i;
   26.25 -    cpumap_t cpumap;
   26.26 +    uint64_t cpumap;
   26.27  
   26.28      static char *kwd_list[] = { "dom", "vcpu", NULL };
   26.29      
   26.30 @@ -300,6 +300,9 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
   26.31      rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
   26.32      if ( rc < 0 )
   26.33          return PyErr_SetFromErrno(xc_error);
   26.34 +    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap);
   26.35 +    if ( rc < 0 )
   26.36 +        return PyErr_SetFromErrno(xc_error);
   26.37  
   26.38      info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
   26.39                                "online",   info.online,
   26.40 @@ -308,7 +311,6 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
   26.41                                "cpu_time", info.cpu_time,
   26.42                                "cpu",      info.cpu);
   26.43  
   26.44 -    cpumap = info.cpumap;
   26.45      cpulist = PyList_New(0);
   26.46      for ( i = 0; cpumap != 0; i++ )
   26.47      {
   26.48 @@ -632,11 +634,11 @@ static PyObject *pyxc_shadow_mem_control
   26.49          return NULL;
   26.50      
   26.51      if ( mbarg < 0 ) 
   26.52 -        op = DOM0_SHADOW_CONTROL_OP_GET_ALLOCATION;
   26.53 +        op = XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION;
   26.54      else 
   26.55      {
   26.56          mb = mbarg;
   26.57 -        op = DOM0_SHADOW_CONTROL_OP_SET_ALLOCATION;
   26.58 +        op = XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION;
   26.59      }
   26.60      if ( xc_shadow_control(xc->xc_handle, dom, op, NULL, 0, &mb, 0, NULL) < 0 )
   26.61          return PyErr_SetFromErrno(xc_error);
   26.62 @@ -654,7 +656,7 @@ static PyObject *pyxc_sched_credit_domai
   26.63      uint16_t cap;
   26.64      static char *kwd_list[] = { "dom", "weight", "cap", NULL };
   26.65      static char kwd_type[] = "I|HH";
   26.66 -    struct sched_credit_adjdom sdom;
   26.67 +    struct xen_domctl_sched_credit sdom;
   26.68      
   26.69      weight = 0;
   26.70      cap = (uint16_t)~0U;
   26.71 @@ -675,7 +677,7 @@ static PyObject *pyxc_sched_credit_domai
   26.72  static PyObject *pyxc_sched_credit_domain_get(XcObject *self, PyObject *args)
   26.73  {
   26.74      uint32_t domid;
   26.75 -    struct sched_credit_adjdom sdom;
   26.76 +    struct xen_domctl_sched_credit sdom;
   26.77      
   26.78      if( !PyArg_ParseTuple(args, "I", &domid) )
   26.79          return NULL;
    27.1 --- a/tools/xenmon/setmask.c	Fri Aug 25 10:39:24 2006 +0100
    27.2 +++ b/tools/xenmon/setmask.c	Fri Aug 25 18:39:10 2006 +0100
    27.3 @@ -40,15 +40,14 @@ typedef struct { int counter; } atomic_t
    27.4  
    27.5  int main(int argc, char * argv[])
    27.6  {
    27.7 -
    27.8 -    dom0_op_t op; 
    27.9 +    struct xen_sysctl sysctl;
   27.10      int ret;
   27.11  
   27.12      int xc_handle = xc_interface_open();
   27.13 -    op.cmd = DOM0_TBUFCONTROL;
   27.14 -    op.interface_version = DOM0_INTERFACE_VERSION;
   27.15 -    op.u.tbufcontrol.op  = DOM0_TBUF_GET_INFO;
   27.16 -    ret = xc_dom0_op(xc_handle, &op);
   27.17 +    sysctl.cmd = XEN_SYSCTL_tbuf_op;
   27.18 +    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
   27.19 +    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_get_info;
   27.20 +    ret = xc_sysctl(xc_handle, &sysctl);
   27.21      if ( ret != 0 )
   27.22      {
   27.23          perror("Failure to get event mask from Xen");
   27.24 @@ -56,26 +55,26 @@ int main(int argc, char * argv[])
   27.25      }
   27.26      else
   27.27      {
   27.28 -        printf("Current event mask: 0x%.8x\n", op.u.tbufcontrol.evt_mask);
   27.29 +        printf("Current event mask: 0x%.8x\n", sysctl.u.tbuf_op.evt_mask);
   27.30      }
   27.31  
   27.32 -    op.cmd = DOM0_TBUFCONTROL;
   27.33 -    op.interface_version = DOM0_INTERFACE_VERSION;
   27.34 -    op.u.tbufcontrol.op  = DOM0_TBUF_SET_EVT_MASK;
   27.35 -    op.u.tbufcontrol.evt_mask = XENMON;
   27.36 +    sysctl.cmd = XEN_SYSCTL_tbuf_op;
   27.37 +    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
   27.38 +    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_set_evt_mask;
   27.39 +    sysctl.u.tbuf_op.evt_mask = XENMON;
   27.40  
   27.41 -    ret = xc_dom0_op(xc_handle, &op);
   27.42 -    printf("Setting mask to 0x%.8x\n", op.u.tbufcontrol.evt_mask);
   27.43 +    ret = xc_sysctl(xc_handle, &sysctl);
   27.44 +    printf("Setting mask to 0x%.8x\n", sysctl.u.tbuf_op.evt_mask);
   27.45      if ( ret != 0 )
   27.46      {
   27.47          perror("Failure to get scheduler ID from Xen");
   27.48          exit(1);
   27.49      }
   27.50  
   27.51 -    op.cmd = DOM0_TBUFCONTROL;
   27.52 -    op.interface_version = DOM0_INTERFACE_VERSION;
   27.53 -    op.u.tbufcontrol.op  = DOM0_TBUF_GET_INFO;
   27.54 -    ret = xc_dom0_op(xc_handle, &op);
   27.55 +    sysctl.cmd = XEN_SYSCTL_tbuf_op;
   27.56 +    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
   27.57 +    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_get_info;
   27.58 +    ret = xc_sysctl(xc_handle, &sysctl);
   27.59      if ( ret != 0 )
   27.60      {
   27.61          perror("Failure to get event mask from Xen");
   27.62 @@ -83,7 +82,7 @@ int main(int argc, char * argv[])
   27.63      }
   27.64      else
   27.65      {
   27.66 -        printf("Current event mask: 0x%.8x\n", op.u.tbufcontrol.evt_mask);
   27.67 +        printf("Current event mask: 0x%.8x\n", sysctl.u.tbuf_op.evt_mask);
   27.68      }
   27.69      xc_interface_close(xc_handle);
   27.70      return 0;
    28.1 --- a/tools/xenstat/libxenstat/src/xenstat.c	Fri Aug 25 10:39:24 2006 +0100
    28.2 +++ b/tools/xenstat/libxenstat/src/xenstat.c	Fri Aug 25 18:39:10 2006 +0100
    28.3 @@ -210,8 +210,8 @@ xenstat_node *xenstat_get_node(xenstat_h
    28.4  {
    28.5  #define DOMAIN_CHUNK_SIZE 256
    28.6  	xenstat_node *node;
    28.7 -	dom0_physinfo_t physinfo;
    28.8 -	dom0_getdomaininfo_t domaininfo[DOMAIN_CHUNK_SIZE];
    28.9 +	xc_physinfo_t physinfo;
   28.10 +	xc_domaininfo_t domaininfo[DOMAIN_CHUNK_SIZE];
   28.11  	unsigned int new_domains;
   28.12  	unsigned int i;
   28.13  
   28.14 @@ -530,7 +530,7 @@ static int xenstat_collect_vcpus(xenstat
   28.15  	
   28.16  		for (vcpu = 0; vcpu < node->domains[i].num_vcpus; vcpu++) {
   28.17  			/* FIXME: need to be using a more efficient mechanism*/
   28.18 -			dom0_getvcpuinfo_t info;
   28.19 +			xc_vcpuinfo_t info;
   28.20  
   28.21  			if (xc_vcpu_getinfo(node->handle->xc_handle,
   28.22  					    node->domains[i].id, vcpu, &info) != 0) {
    29.1 --- a/xen/arch/ia64/xen/dom0_ops.c	Fri Aug 25 10:39:24 2006 +0100
    29.2 +++ b/xen/arch/ia64/xen/dom0_ops.c	Fri Aug 25 18:39:10 2006 +0100
    29.3 @@ -10,14 +10,14 @@
    29.4  #include <xen/types.h>
    29.5  #include <xen/lib.h>
    29.6  #include <xen/mm.h>
    29.7 -#include <public/dom0_ops.h>
    29.8 +#include <public/domctl.h>
    29.9 +#include <public/sysctl.h>
   29.10  #include <xen/sched.h>
   29.11  #include <xen/event.h>
   29.12  #include <asm/pdb.h>
   29.13  #include <xen/trace.h>
   29.14  #include <xen/console.h>
   29.15  #include <xen/guest_access.h>
   29.16 -#include <public/sched_ctl.h>
   29.17  #include <asm/vmx.h>
   29.18  #include <asm/dom_fw.h>
   29.19  #include <xen/iocap.h>
   29.20 @@ -25,7 +25,8 @@
   29.21  void build_physmap_table(struct domain *d);
   29.22  
   29.23  extern unsigned long total_pages;
   29.24 -long arch_do_dom0_op(dom0_op_t *op, XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op)
   29.25 +
   29.26 +long arch_do_domctl(xen_domctl_t *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
   29.27  {
   29.28      long ret = 0;
   29.29  
   29.30 @@ -34,10 +35,10 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_
   29.31  
   29.32      switch ( op->cmd )
   29.33      {
   29.34 -    case DOM0_GETMEMLIST:
   29.35 +    case XEN_DOMCTL_getmemlist:
   29.36      {
   29.37          unsigned long i;
   29.38 -        struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
   29.39 +        struct domain *d = find_domain_by_id(op->domain);
   29.40          unsigned long start_page = op->u.getmemlist.max_pfns >> 32;
   29.41          unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff;
   29.42          unsigned long mfn;
   29.43 @@ -63,39 +64,17 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_
   29.44          }
   29.45  
   29.46          op->u.getmemlist.num_pfns = i;
   29.47 -        if (copy_to_guest(u_dom0_op, op, 1))
   29.48 +        if (copy_to_guest(u_domctl, op, 1))
   29.49              ret = -EFAULT;
   29.50  
   29.51          put_domain(d);
   29.52      }
   29.53      break;
   29.54  
   29.55 -    case DOM0_PHYSINFO:
   29.56 +    case XEN_DOMCTL_arch_setup:
   29.57      {
   29.58 -        dom0_physinfo_t *pi = &op->u.physinfo;
   29.59 -
   29.60 -        pi->threads_per_core =
   29.61 -            cpus_weight(cpu_sibling_map[0]);
   29.62 -        pi->cores_per_socket =
   29.63 -            cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
   29.64 -        pi->sockets_per_node = 
   29.65 -            num_online_cpus() / cpus_weight(cpu_core_map[0]);
   29.66 -        pi->nr_nodes         = 1;
   29.67 -        pi->total_pages      = total_pages; 
   29.68 -        pi->free_pages       = avail_domheap_pages();
   29.69 -        pi->cpu_khz          = local_cpu_data->proc_freq / 1000;
   29.70 -        memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
   29.71 -        //memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
   29.72 -        ret = 0;
   29.73 -        if ( copy_to_guest(u_dom0_op, op, 1) )
   29.74 -            ret = -EFAULT;
   29.75 -    }
   29.76 -    break;
   29.77 -
   29.78 -    case DOM0_DOMAIN_SETUP:
   29.79 -    {
   29.80 -        dom0_domain_setup_t *ds = &op->u.domain_setup;
   29.81 -        struct domain *d = find_domain_by_id(ds->domain);
   29.82 +        xen_domctl_arch_setup_t *ds = &op->u.arch_setup;
   29.83 +        struct domain *d = find_domain_by_id(op->domain);
   29.84  
   29.85          if ( d == NULL) {
   29.86              ret = -EINVAL;
   29.87 @@ -112,7 +91,7 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_
   29.88              ds->xsi_va = d->arch.shared_info_va;
   29.89              ds->hypercall_imm = d->arch.breakimm;
   29.90              /* Copy back.  */
   29.91 -            if ( copy_to_guest(u_dom0_op, op, 1) )
   29.92 +            if ( copy_to_guest(u_domctl, op, 1) )
   29.93                  ret = -EFAULT;
   29.94          }
   29.95          else {
   29.96 @@ -152,21 +131,21 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_
   29.97      }
   29.98      break;
   29.99  
  29.100 -    case DOM0_SHADOW_CONTROL:
  29.101 +    case XEN_DOMCTL_shadow_op:
  29.102      {
  29.103          struct domain *d; 
  29.104          ret = -ESRCH;
  29.105 -        d = find_domain_by_id(op->u.shadow_control.domain);
  29.106 +        d = find_domain_by_id(op->domain);
  29.107          if ( d != NULL )
  29.108          {
  29.109 -            ret = shadow_mode_control(d, &op->u.shadow_control);
  29.110 +            ret = shadow_mode_control(d, &op->u.shadow_op);
  29.111              put_domain(d);
  29.112 -            copy_to_guest(u_dom0_op, op, 1);
  29.113 +            copy_to_guest(u_domctl, op, 1);
  29.114          } 
  29.115      }
  29.116      break;
  29.117  
  29.118 -    case DOM0_IOPORT_PERMISSION:
  29.119 +    case XEN_DOMCTL_ioport_permission:
  29.120      {
  29.121          struct domain *d;
  29.122          unsigned int fp = op->u.ioport_permission.first_port;
  29.123 @@ -174,7 +153,7 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_
  29.124          unsigned int lp = fp + np - 1;
  29.125  
  29.126          ret = -ESRCH;
  29.127 -        d = find_domain_by_id(op->u.ioport_permission.domain);
  29.128 +        d = find_domain_by_id(op->domain);
  29.129          if (unlikely(d == NULL))
  29.130              break;
  29.131  
  29.132 @@ -191,7 +170,47 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_
  29.133      }
  29.134      break;
  29.135      default:
  29.136 -        printf("arch_do_dom0_op: unrecognized dom0 op: %d!!!\n",op->cmd);
  29.137 +        printf("arch_do_domctl: unrecognized domctl: %d!!!\n",op->cmd);
  29.138 +        ret = -ENOSYS;
  29.139 +
  29.140 +    }
  29.141 +
  29.142 +    return ret;
  29.143 +}
  29.144 +
  29.145 +long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
  29.146 +{
  29.147 +    long ret = 0;
  29.148 +
  29.149 +    if ( !IS_PRIV(current->domain) )
  29.150 +        return -EPERM;
  29.151 +
  29.152 +    switch ( op->cmd )
  29.153 +    {
  29.154 +    case XEN_SYSCTL_physinfo:
  29.155 +    {
  29.156 +        xen_sysctl_physinfo_t *pi = &op->u.physinfo;
  29.157 +
  29.158 +        pi->threads_per_core =
  29.159 +            cpus_weight(cpu_sibling_map[0]);
  29.160 +        pi->cores_per_socket =
  29.161 +            cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
  29.162 +        pi->sockets_per_node = 
  29.163 +            num_online_cpus() / cpus_weight(cpu_core_map[0]);
  29.164 +        pi->nr_nodes         = 1;
  29.165 +        pi->total_pages      = total_pages; 
  29.166 +        pi->free_pages       = avail_domheap_pages();
  29.167 +        pi->cpu_khz          = local_cpu_data->proc_freq / 1000;
  29.168 +        memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
  29.169 +        //memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
  29.170 +        ret = 0;
  29.171 +        if ( copy_to_guest(u_sysctl, op, 1) )
  29.172 +            ret = -EFAULT;
  29.173 +    }
  29.174 +    break;
  29.175 +
  29.176 +    default:
  29.177 +        printf("arch_do_sysctl: unrecognized sysctl: %d!!!\n",op->cmd);
  29.178          ret = -ENOSYS;
  29.179  
  29.180      }
    30.1 --- a/xen/arch/ia64/xen/domain.c	Fri Aug 25 10:39:24 2006 +0100
    30.2 +++ b/xen/arch/ia64/xen/domain.c	Fri Aug 25 18:39:10 2006 +0100
    30.3 @@ -591,7 +591,7 @@ domain_set_shared_info_va (unsigned long
    30.4  /* Transfer and clear the shadow bitmap in 1kB chunks for L1 cache. */
    30.5  #define SHADOW_COPY_CHUNK (1024 / sizeof (unsigned long))
    30.6  
    30.7 -int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc)
    30.8 +int shadow_mode_control(struct domain *d, xen_domctl_shadow_ops_t *sc)
    30.9  {
   30.10  	unsigned int op = sc->op;
   30.11  	int          rc = 0;
   30.12 @@ -607,7 +607,7 @@ int shadow_mode_control(struct domain *d
   30.13  
   30.14  	switch (op)
   30.15  	{
   30.16 -	case DOM0_SHADOW_CONTROL_OP_OFF:
   30.17 +	case XEN_DOMCTL_SHADOW_OP_OFF:
   30.18  		if (shadow_mode_enabled (d)) {
   30.19  			u64 *bm = d->arch.shadow_bitmap;
   30.20  
   30.21 @@ -621,12 +621,12 @@ int shadow_mode_control(struct domain *d
   30.22  		}
   30.23  		break;
   30.24  
   30.25 -	case DOM0_SHADOW_CONTROL_OP_ENABLE_TEST:
   30.26 -	case DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE:
   30.27 +	case XEN_DOMCTL_SHADOW_OP_ENABLE_TEST:
   30.28 +	case XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE:
   30.29  		rc = -EINVAL;
   30.30  		break;
   30.31  
   30.32 -	case DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY:
   30.33 +	case XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY:
   30.34  		if (shadow_mode_enabled(d)) {
   30.35  			rc = -EINVAL;
   30.36  			break;
   30.37 @@ -653,7 +653,7 @@ int shadow_mode_control(struct domain *d
   30.38  		}
   30.39  		break;
   30.40  
   30.41 -	case DOM0_SHADOW_CONTROL_OP_CLEAN:
   30.42 +	case XEN_DOMCTL_SHADOW_OP_CLEAN:
   30.43  	  {
   30.44  		int nbr_longs;
   30.45  
   30.46 @@ -692,7 +692,7 @@ int shadow_mode_control(struct domain *d
   30.47  		break;
   30.48  	  }
   30.49  
   30.50 -	case DOM0_SHADOW_CONTROL_OP_PEEK:
   30.51 +	case XEN_DOMCTL_SHADOW_OP_PEEK:
   30.52  	{
   30.53  		unsigned long size;
   30.54  
    31.1 --- a/xen/arch/ia64/xen/hypercall.c	Fri Aug 25 10:39:24 2006 +0100
    31.2 +++ b/xen/arch/ia64/xen/hypercall.c	Fri Aug 25 18:39:10 2006 +0100
    31.3 @@ -18,7 +18,8 @@
    31.4  
    31.5  #include <asm/vcpu.h>
    31.6  #include <asm/dom_fw.h>
    31.7 -#include <public/dom0_ops.h>
    31.8 +#include <public/domctl.h>
    31.9 +#include <public/sysctl.h>
   31.10  #include <public/event_channel.h>
   31.11  #include <public/memory.h>
   31.12  #include <public/sched.h>
   31.13 @@ -43,7 +44,7 @@ hypercall_t ia64_hypercall_table[] =
   31.14  	(hypercall_t)do_ni_hypercall,		/* do_set_callbacks */
   31.15  	(hypercall_t)do_ni_hypercall,		/* do_fpu_taskswitch */		/*  5 */
   31.16  	(hypercall_t)do_sched_op_compat,
   31.17 -	(hypercall_t)do_dom0_op,
   31.18 +	(hypercall_t)do_ni_hypercall,
   31.19  	(hypercall_t)do_ni_hypercall,		/* do_set_debugreg */
   31.20  	(hypercall_t)do_ni_hypercall,		/* do_get_debugreg */
   31.21  	(hypercall_t)do_ni_hypercall,		/* do_update_descriptor */	/* 10 */
   31.22 @@ -71,8 +72,8 @@ hypercall_t ia64_hypercall_table[] =
   31.23  	(hypercall_t)do_event_channel_op,
   31.24  	(hypercall_t)do_physdev_op,
   31.25  	(hypercall_t)do_hvm_op,			/*  */
   31.26 -	(hypercall_t)do_ni_hypercall,		/*  */                  /* 35 */
   31.27 -	(hypercall_t)do_ni_hypercall,		/*  */
   31.28 +	(hypercall_t)do_sysctl,			/*  */                  /* 35 */
   31.29 +	(hypercall_t)do_domctl,			/*  */
   31.30  	(hypercall_t)do_ni_hypercall,		/*  */
   31.31  	(hypercall_t)do_ni_hypercall,		/*  */
   31.32  	(hypercall_t)do_ni_hypercall,		/*  */
    32.1 --- a/xen/arch/powerpc/dom0_ops.c	Fri Aug 25 10:39:24 2006 +0100
    32.2 +++ b/xen/arch/powerpc/dom0_ops.c	Fri Aug 25 18:39:10 2006 +0100
    32.3 @@ -24,10 +24,8 @@
    32.4  #include <xen/sched.h>
    32.5  #include <xen/guest_access.h>
    32.6  #include <public/xen.h>
    32.7 -#include <public/dom0_ops.h>
    32.8 -
    32.9 -extern void arch_getdomaininfo_ctxt(struct vcpu *v, vcpu_guest_context_t *c);
   32.10 -extern long arch_do_dom0_op(struct dom0_op *op, XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op);
   32.11 +#include <public/domctl.h>
   32.12 +#include <public/sysctl.h>
   32.13  
   32.14  void arch_getdomaininfo_ctxt(struct vcpu *v, vcpu_guest_context_t *c)
   32.15  { 
   32.16 @@ -35,16 +33,17 @@ void arch_getdomaininfo_ctxt(struct vcpu
   32.17      /* XXX fill in rest of vcpu_guest_context_t */
   32.18  }
   32.19  
   32.20 -long arch_do_dom0_op(struct dom0_op *op, XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op)
   32.21 +long arch_do_domctl(struct xen_domctl *domctl,
   32.22 +                    XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
   32.23  {
   32.24      long ret = 0;
   32.25  
   32.26 -    switch (op->cmd) {
   32.27 -    case DOM0_GETMEMLIST:
   32.28 +    switch (domctl->cmd) {
   32.29 +    case XEN_DOMCTL_getmemlist:
   32.30      {
   32.31          int i;
   32.32 -        struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
   32.33 -        unsigned long max_pfns = op->u.getmemlist.max_pfns;
   32.34 +        struct domain *d = find_domain_by_id(domctl->domain);
   32.35 +        unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
   32.36          xen_pfn_t mfn;
   32.37          struct list_head *list_ent;
   32.38  
   32.39 @@ -59,7 +58,7 @@ long arch_do_dom0_op(struct dom0_op *op,
   32.40              {
   32.41                  mfn = page_to_mfn(list_entry(
   32.42                      list_ent, struct page_info, list));
   32.43 -                if ( copy_to_guest_offset(op->u.getmemlist.buffer,
   32.44 +                if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
   32.45                                            i, &mfn, 1) )
   32.46                  {
   32.47                      ret = -EFAULT;
   32.48 @@ -69,17 +68,31 @@ long arch_do_dom0_op(struct dom0_op *op,
   32.49              }
   32.50              spin_unlock(&d->page_alloc_lock);
   32.51  
   32.52 -            op->u.getmemlist.num_pfns = i;
   32.53 -            copy_to_guest(u_dom0_op, op, 1);
   32.54 +            domctl->u.getmemlist.num_pfns = i;
   32.55 +            copy_to_guest(u_domctl, domctl, 1);
   32.56              
   32.57              put_domain(d);
   32.58          }
   32.59      }
   32.60      break;
   32.61  
   32.62 -    case DOM0_PHYSINFO:
   32.63 +    default:
   32.64 +        ret = -ENOSYS;
   32.65 +        break;
   32.66 +    }
   32.67 +
   32.68 +    return ret;
   32.69 +}
   32.70 +
   32.71 +long arch_do_sysctl(struct xen_sysctl *sysctl,
   32.72 +                    XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
   32.73 +{
   32.74 +    long ret = 0;
   32.75 +
   32.76 +    switch (sysctl->cmd) {
   32.77 +    case XEN_SYSCTL_physinfo:
   32.78      {
   32.79 -        dom0_physinfo_t *pi = &op->u.physinfo;
   32.80 +        xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo;
   32.81  
   32.82          pi->threads_per_core = 1;
   32.83          pi->cores_per_socket = 1;
   32.84 @@ -90,7 +103,7 @@ long arch_do_dom0_op(struct dom0_op *op,
   32.85          pi->cpu_khz          = cpu_khz;
   32.86          memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
   32.87          ret = 0;
   32.88 -        if ( copy_to_guest(u_dom0_op, op, 1) )
   32.89 +        if ( copy_to_guest(u_sysctl, sysctl, 1) )
   32.90              ret = -EFAULT;
   32.91      }
   32.92      break;
   32.93 @@ -102,3 +115,4 @@ long arch_do_dom0_op(struct dom0_op *op,
   32.94  
   32.95      return ret;
   32.96  }
   32.97 +
    33.1 --- a/xen/arch/powerpc/powerpc64/hypercall_table.S	Fri Aug 25 10:39:24 2006 +0100
    33.2 +++ b/xen/arch/powerpc/powerpc64/hypercall_table.S	Fri Aug 25 18:39:10 2006 +0100
    33.3 @@ -11,7 +11,7 @@
    33.4          .quad 0 /* do_set_callbacks */
    33.5          .quad 0 /* do_fpu_taskswitch */     /*  5 */
    33.6          .quad do_sched_op
    33.7 -        .quad do_dom0_op
    33.8 +        .quad 0 /* do_platform_op */
    33.9          .quad 0 /* do_set_debugreg */
   33.10          .quad 0 /* do_get_debugreg */
   33.11          .quad 0 /* do_update_descriptor */  /* 10 */
   33.12 @@ -38,46 +38,9 @@
   33.13          .quad 0 /* do_xenoprof_op */
   33.14          .quad do_event_channel_op
   33.15          .quad do_physdev_op
   33.16 +        .quad 0 /* do_hvm_op */
   33.17 +        .quad do_sysctl             /* 35 */
   33.18 +        .quad do_domctl
   33.19          .rept NR_hypercalls-((.-__hypercall_table)/8)
   33.20          .quad do_ni_hypercall
   33.21          .endr
   33.22 -
   33.23 -        .globl hypercall_args_table
   33.24 -hypercall_args_table:	
   33.25 -        .byte 1 /* do_set_trap_table    */  /*  0 */
   33.26 -        .byte 4 /* do_mmu_update        */
   33.27 -        .byte 2 /* do_set_gdt           */
   33.28 -        .byte 2 /* do_stack_switch      */
   33.29 -        .byte 4 /* do_set_callbacks     */
   33.30 -        .byte 1 /* do_fpu_taskswitch    */  /*  5 */
   33.31 -        .byte 2 /* do_arch_sched_op     */
   33.32 -        .byte 1 /* do_dom0_op           */
   33.33 -        .byte 2 /* do_set_debugreg      */
   33.34 -        .byte 1 /* do_get_debugreg      */
   33.35 -        .byte 4 /* do_update_descriptor */  /* 10 */
   33.36 -        .byte 0 /* do_ni_hypercall      */
   33.37 -        .byte 2 /* do_memory_op         */
   33.38 -        .byte 2 /* do_multicall         */
   33.39 -        .byte 4 /* do_update_va_mapping */
   33.40 -        .byte 2 /* do_set_timer_op      */  /* 15 */
   33.41 -        .byte 1 /* do_event_channel_op  */
   33.42 -        .byte 2 /* do_xen_version       */
   33.43 -        .byte 3 /* do_console_io        */
   33.44 -        .byte 1 /* do_physdev_op        */
   33.45 -        .byte 3 /* do_grant_table_op    */  /* 20 */
   33.46 -        .byte 2 /* do_vm_assist         */
   33.47 -        .byte 5 /* do_update_va_mapping_otherdomain */
   33.48 -        .byte 0 /* do_switch_vm86       */
   33.49 -        .byte 2 /* do_boot_vcpu         */
   33.50 -        .byte 0 /* do_ni_hypercall      */  /* 25 */
   33.51 -        .byte 4 /* do_mmuext_op         */
   33.52 -        .byte 1 /* do_acm_op            */
   33.53 -        .byte 2 /* do_nmi_op            */
   33.54 -        .byte 2 /* do_arch_sched_op     */
   33.55 -        .byte 2 /* do_callback_op       */  /* 30 */
   33.56 -        .byte 2 /* do_xenoprof_op       */
   33.57 -        .byte 2 /* do_event_channel_op  */
   33.58 -        .byte 2 /* do_physdev_op        */
   33.59 -        .rept NR_hypercalls-(.-hypercall_args_table)
   33.60 -        .byte 0 /* do_ni_hypercall      */
   33.61 -        .endr
    34.1 --- a/xen/arch/x86/Makefile	Fri Aug 25 10:39:24 2006 +0100
    34.2 +++ b/xen/arch/x86/Makefile	Fri Aug 25 18:39:10 2006 +0100
    34.3 @@ -12,12 +12,13 @@ obj-y += bitops.o
    34.4  obj-y += compat.o
    34.5  obj-y += delay.o
    34.6  obj-y += dmi_scan.o
    34.7 -obj-y += dom0_ops.o
    34.8 +obj-y += domctl.o
    34.9  obj-y += domain.o
   34.10  obj-y += domain_build.o
   34.11  obj-y += e820.o
   34.12  obj-y += extable.o
   34.13  obj-y += flushtlb.o
   34.14 +obj-y += platform_hypercall.o
   34.15  obj-y += i387.o
   34.16  obj-y += i8259.o
   34.17  obj-y += io_apic.o
   34.18 @@ -33,6 +34,7 @@ obj-y += shutdown.o
   34.19  obj-y += smp.o
   34.20  obj-y += smpboot.o
   34.21  obj-y += string.o
   34.22 +obj-y += sysctl.o
   34.23  obj-y += time.o
   34.24  obj-y += trampoline.o
   34.25  obj-y += traps.o
    35.1 --- a/xen/arch/x86/dom0_ops.c	Fri Aug 25 10:39:24 2006 +0100
    35.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    35.3 @@ -1,486 +0,0 @@
    35.4 -/******************************************************************************
    35.5 - * Arch-specific dom0_ops.c
    35.6 - * 
    35.7 - * Process command requests from domain-0 guest OS.
    35.8 - * 
    35.9 - * Copyright (c) 2002, K A Fraser
   35.10 - */
   35.11 -
   35.12 -#include <xen/config.h>
   35.13 -#include <xen/types.h>
   35.14 -#include <xen/lib.h>
   35.15 -#include <xen/mm.h>
   35.16 -#include <xen/guest_access.h>
   35.17 -#include <public/dom0_ops.h>
   35.18 -#include <xen/sched.h>
   35.19 -#include <xen/event.h>
   35.20 -#include <xen/domain_page.h>
   35.21 -#include <asm/msr.h>
   35.22 -#include <xen/trace.h>
   35.23 -#include <xen/console.h>
   35.24 -#include <xen/iocap.h>
   35.25 -#include <asm/shadow.h>
   35.26 -#include <asm/irq.h>
   35.27 -#include <asm/hvm/hvm.h>
   35.28 -#include <asm/hvm/support.h>
   35.29 -#include <asm/processor.h>
   35.30 -#include <public/sched_ctl.h>
   35.31 -
   35.32 -#include <asm/mtrr.h>
   35.33 -#include "cpu/mtrr/mtrr.h"
   35.34 -
   35.35 -#define TRC_DOM0OP_ENTER_BASE  0x00020000
   35.36 -#define TRC_DOM0OP_LEAVE_BASE  0x00030000
   35.37 -
   35.38 -static int msr_cpu_mask;
   35.39 -static unsigned long msr_addr;
   35.40 -static unsigned long msr_lo;
   35.41 -static unsigned long msr_hi;
   35.42 -
   35.43 -static void write_msr_for(void *unused)
   35.44 -{
   35.45 -    if ( ((1 << smp_processor_id()) & msr_cpu_mask) )
   35.46 -        (void)wrmsr_safe(msr_addr, msr_lo, msr_hi);
   35.47 -}
   35.48 -
   35.49 -static void read_msr_for(void *unused)
   35.50 -{
   35.51 -    if ( ((1 << smp_processor_id()) & msr_cpu_mask) )
   35.52 -        (void)rdmsr_safe(msr_addr, msr_lo, msr_hi);
   35.53 -}
   35.54 -
   35.55 -long arch_do_dom0_op(struct dom0_op *op, XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op)
   35.56 -{
   35.57 -    long ret = 0;
   35.58 -
   35.59 -    switch ( op->cmd )
   35.60 -    {
   35.61 -
   35.62 -    case DOM0_MSR:
   35.63 -    {
   35.64 -        if ( op->u.msr.write )
   35.65 -        {
   35.66 -            msr_cpu_mask = op->u.msr.cpu_mask;
   35.67 -            msr_addr = op->u.msr.msr;
   35.68 -            msr_lo = op->u.msr.in1;
   35.69 -            msr_hi = op->u.msr.in2;
   35.70 -            smp_call_function(write_msr_for, NULL, 1, 1);
   35.71 -            write_msr_for(NULL);
   35.72 -        }
   35.73 -        else
   35.74 -        {
   35.75 -            msr_cpu_mask = op->u.msr.cpu_mask;
   35.76 -            msr_addr = op->u.msr.msr;
   35.77 -            smp_call_function(read_msr_for, NULL, 1, 1);
   35.78 -            read_msr_for(NULL);
   35.79 -
   35.80 -            op->u.msr.out1 = msr_lo;
   35.81 -            op->u.msr.out2 = msr_hi;
   35.82 -            copy_to_guest(u_dom0_op, op, 1);
   35.83 -        }
   35.84 -        ret = 0;
   35.85 -    }
   35.86 -    break;
   35.87 -
   35.88 -    case DOM0_SHADOW_CONTROL:
   35.89 -    {
   35.90 -        struct domain *d;
   35.91 -        ret = -ESRCH;
   35.92 -        d = find_domain_by_id(op->u.shadow_control.domain);
   35.93 -        if ( d != NULL )
   35.94 -        {
   35.95 -            ret = shadow2_control_op(d, &op->u.shadow_control, u_dom0_op);
   35.96 -            put_domain(d);
   35.97 -            copy_to_guest(u_dom0_op, op, 1);
   35.98 -        } 
   35.99 -    }
  35.100 -    break;
  35.101 -
  35.102 -    case DOM0_ADD_MEMTYPE:
  35.103 -    {
  35.104 -        ret = mtrr_add_page(
  35.105 -            op->u.add_memtype.mfn,
  35.106 -            op->u.add_memtype.nr_mfns,
  35.107 -            op->u.add_memtype.type,
  35.108 -            1);
  35.109 -        if ( ret > 0 )
  35.110 -        {
  35.111 -            op->u.add_memtype.handle = 0;
  35.112 -            op->u.add_memtype.reg    = ret;
  35.113 -            (void)copy_to_guest(u_dom0_op, op, 1);
  35.114 -            ret = 0;
  35.115 -        }
  35.116 -    }
  35.117 -    break;
  35.118 -
  35.119 -    case DOM0_DEL_MEMTYPE:
  35.120 -    {
  35.121 -        if (op->u.del_memtype.handle == 0
  35.122 -            /* mtrr/main.c otherwise does a lookup */
  35.123 -            && (int)op->u.del_memtype.reg >= 0)
  35.124 -        {
  35.125 -            ret = mtrr_del_page(op->u.del_memtype.reg, 0, 0);
  35.126 -            if (ret > 0)
  35.127 -                ret = 0;
  35.128 -        }
  35.129 -        else
  35.130 -            ret = -EINVAL;
  35.131 -    }
  35.132 -    break;
  35.133 -
  35.134 -    case DOM0_READ_MEMTYPE:
  35.135 -    {
  35.136 -        unsigned long mfn;
  35.137 -        unsigned int  nr_mfns;
  35.138 -        mtrr_type     type;
  35.139 -
  35.140 -        ret = -EINVAL;
  35.141 -        if ( op->u.read_memtype.reg < num_var_ranges )
  35.142 -        {
  35.143 -            mtrr_if->get(op->u.read_memtype.reg, &mfn, &nr_mfns, &type);
  35.144 -            op->u.read_memtype.mfn     = mfn;
  35.145 -            op->u.read_memtype.nr_mfns = nr_mfns;
  35.146 -            op->u.read_memtype.type    = type;
  35.147 -            (void)copy_to_guest(u_dom0_op, op, 1);
  35.148 -            ret = 0;
  35.149 -        }
  35.150 -    }
  35.151 -    break;
  35.152 -
  35.153 -    case DOM0_MICROCODE:
  35.154 -    {
  35.155 -        extern int microcode_update(void *buf, unsigned long len);
  35.156 -        ret = microcode_update(op->u.microcode.data.p, op->u.microcode.length);
  35.157 -    }
  35.158 -    break;
  35.159 -
  35.160 -    case DOM0_IOPORT_PERMISSION:
  35.161 -    {
  35.162 -        struct domain *d;
  35.163 -        unsigned int fp = op->u.ioport_permission.first_port;
  35.164 -        unsigned int np = op->u.ioport_permission.nr_ports;
  35.165 -
  35.166 -        ret = -EINVAL;
  35.167 -        if ( (fp + np) > 65536 )
  35.168 -            break;
  35.169 -
  35.170 -        ret = -ESRCH;
  35.171 -        if ( unlikely((d = find_domain_by_id(
  35.172 -            op->u.ioport_permission.domain)) == NULL) )
  35.173 -            break;
  35.174 -
  35.175 -        if ( np == 0 )
  35.176 -            ret = 0;
  35.177 -        else if ( op->u.ioport_permission.allow_access )
  35.178 -            ret = ioports_permit_access(d, fp, fp + np - 1);
  35.179 -        else
  35.180 -            ret = ioports_deny_access(d, fp, fp + np - 1);
  35.181 -
  35.182 -        put_domain(d);
  35.183 -    }
  35.184 -    break;
  35.185 -
  35.186 -    case DOM0_PHYSINFO:
  35.187 -    {
  35.188 -        dom0_physinfo_t *pi = &op->u.physinfo;
  35.189 -
  35.190 -        pi->threads_per_core =
  35.191 -            cpus_weight(cpu_sibling_map[0]);
  35.192 -        pi->cores_per_socket =
  35.193 -            cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
  35.194 -        pi->sockets_per_node = 
  35.195 -            num_online_cpus() / cpus_weight(cpu_core_map[0]);
  35.196 -
  35.197 -        pi->nr_nodes         = 1;
  35.198 -        pi->total_pages      = total_pages;
  35.199 -        pi->free_pages       = avail_domheap_pages();
  35.200 -        pi->scrub_pages      = avail_scrub_pages();
  35.201 -        pi->cpu_khz          = cpu_khz;
  35.202 -        memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
  35.203 -        memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
  35.204 -        ret = 0;
  35.205 -        if ( copy_to_guest(u_dom0_op, op, 1) )
  35.206 -            ret = -EFAULT;
  35.207 -    }
  35.208 -    break;
  35.209 -    
  35.210 -    case DOM0_GETPAGEFRAMEINFO:
  35.211 -    {
  35.212 -        struct page_info *page;
  35.213 -        unsigned long mfn = op->u.getpageframeinfo.gmfn;
  35.214 -        domid_t dom = op->u.getpageframeinfo.domain;
  35.215 -        struct domain *d;
  35.216 -
  35.217 -        ret = -EINVAL;
  35.218 -
  35.219 -        if ( unlikely(!mfn_valid(mfn)) ||
  35.220 -             unlikely((d = find_domain_by_id(dom)) == NULL) )
  35.221 -            break;
  35.222 -
  35.223 -        page = mfn_to_page(mfn);
  35.224 -
  35.225 -        if ( likely(get_page(page, d)) )
  35.226 -        {
  35.227 -            ret = 0;
  35.228 -
  35.229 -            op->u.getpageframeinfo.type = NOTAB;
  35.230 -
  35.231 -            if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
  35.232 -            {
  35.233 -                switch ( page->u.inuse.type_info & PGT_type_mask )
  35.234 -                {
  35.235 -                case PGT_l1_page_table:
  35.236 -                    op->u.getpageframeinfo.type = L1TAB;
  35.237 -                    break;
  35.238 -                case PGT_l2_page_table:
  35.239 -                    op->u.getpageframeinfo.type = L2TAB;
  35.240 -                    break;
  35.241 -                case PGT_l3_page_table:
  35.242 -                    op->u.getpageframeinfo.type = L3TAB;
  35.243 -                    break;
  35.244 -                case PGT_l4_page_table:
  35.245 -                    op->u.getpageframeinfo.type = L4TAB;
  35.246 -                    break;
  35.247 -                }
  35.248 -            }
  35.249 -            
  35.250 -            put_page(page);
  35.251 -        }
  35.252 -
  35.253 -        put_domain(d);
  35.254 -
  35.255 -        copy_to_guest(u_dom0_op, op, 1);
  35.256 -    }
  35.257 -    break;
  35.258 -
  35.259 -    case DOM0_GETPAGEFRAMEINFO2:
  35.260 -    {
  35.261 -#define GPF2_BATCH (PAGE_SIZE / sizeof(unsigned long)) 
  35.262 -        int n,j;
  35.263 -        int num = op->u.getpageframeinfo2.num;
  35.264 -        domid_t dom = op->u.getpageframeinfo2.domain;
  35.265 -        struct domain *d;
  35.266 -        unsigned long *l_arr;
  35.267 -        ret = -ESRCH;
  35.268 -
  35.269 -        if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
  35.270 -            break;
  35.271 -
  35.272 -        if ( unlikely(num > 1024) )
  35.273 -        {
  35.274 -            ret = -E2BIG;
  35.275 -            put_domain(d);
  35.276 -            break;
  35.277 -        }
  35.278 -
  35.279 -        l_arr = alloc_xenheap_page();
  35.280 - 
  35.281 -        ret = 0;
  35.282 -        for( n = 0; n < num; )
  35.283 -        {
  35.284 -            int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
  35.285 -
  35.286 -            if ( copy_from_guest_offset(l_arr, op->u.getpageframeinfo2.array,
  35.287 -                                        n, k) )
  35.288 -            {
  35.289 -                ret = -EINVAL;
  35.290 -                break;
  35.291 -            }
  35.292 -     
  35.293 -            for( j = 0; j < k; j++ )
  35.294 -            {      
  35.295 -                struct page_info *page;
  35.296 -                unsigned long mfn = l_arr[j];
  35.297 -
  35.298 -                page = mfn_to_page(mfn);
  35.299 -
  35.300 -                if ( likely(mfn_valid(mfn) && get_page(page, d)) ) 
  35.301 -                {
  35.302 -                    unsigned long type = 0;
  35.303 -
  35.304 -                    switch( page->u.inuse.type_info & PGT_type_mask )
  35.305 -                    {
  35.306 -                    case PGT_l1_page_table:
  35.307 -                        type = L1TAB;
  35.308 -                        break;
  35.309 -                    case PGT_l2_page_table:
  35.310 -                        type = L2TAB;
  35.311 -                        break;
  35.312 -                    case PGT_l3_page_table:
  35.313 -                        type = L3TAB;
  35.314 -                        break;
  35.315 -                    case PGT_l4_page_table:
  35.316 -                        type = L4TAB;
  35.317 -                        break;
  35.318 -                    }
  35.319 -
  35.320 -                    if ( page->u.inuse.type_info & PGT_pinned )
  35.321 -                        type |= LPINTAB;
  35.322 -                    l_arr[j] |= type;
  35.323 -                    put_page(page);
  35.324 -                }
  35.325 -                else
  35.326 -                    l_arr[j] |= XTAB;
  35.327 -
  35.328 -            }
  35.329 -
  35.330 -            if ( copy_to_guest_offset(op->u.getpageframeinfo2.array,
  35.331 -                                      n, l_arr, k) )
  35.332 -            {
  35.333 -                ret = -EINVAL;
  35.334 -                break;
  35.335 -            }
  35.336 -
  35.337 -            n += k;
  35.338 -        }
  35.339 -
  35.340 -        free_xenheap_page(l_arr);
  35.341 -
  35.342 -        put_domain(d);
  35.343 -    }
  35.344 -    break;
  35.345 -
  35.346 -    case DOM0_GETMEMLIST:
  35.347 -    {
  35.348 -        int i;
  35.349 -        struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
  35.350 -        unsigned long max_pfns = op->u.getmemlist.max_pfns;
  35.351 -        unsigned long mfn;
  35.352 -        struct list_head *list_ent;
  35.353 -
  35.354 -        ret = -EINVAL;
  35.355 -        if ( d != NULL )
  35.356 -        {
  35.357 -            ret = 0;
  35.358 -
  35.359 -            spin_lock(&d->page_alloc_lock);
  35.360 -            list_ent = d->page_list.next;
  35.361 -            for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
  35.362 -            {
  35.363 -                mfn = page_to_mfn(list_entry(
  35.364 -                    list_ent, struct page_info, list));
  35.365 -                if ( copy_to_guest_offset(op->u.getmemlist.buffer,
  35.366 -                                          i, &mfn, 1) )
  35.367 -                {
  35.368 -                    ret = -EFAULT;
  35.369 -                    break;
  35.370 -                }
  35.371 -                list_ent = mfn_to_page(mfn)->list.next;
  35.372 -            }
  35.373 -            spin_unlock(&d->page_alloc_lock);
  35.374 -
  35.375 -            op->u.getmemlist.num_pfns = i;
  35.376 -            copy_to_guest(u_dom0_op, op, 1);
  35.377 -            
  35.378 -            put_domain(d);
  35.379 -        }
  35.380 -    }
  35.381 -    break;
  35.382 -
  35.383 -    case DOM0_PLATFORM_QUIRK:
  35.384 -    {
  35.385 -        extern int opt_noirqbalance;
  35.386 -        int quirk_id = op->u.platform_quirk.quirk_id;
  35.387 -        switch ( quirk_id )
  35.388 -        {
  35.389 -        case QUIRK_NOIRQBALANCING:
  35.390 -            printk("Platform quirk -- Disabling IRQ balancing/affinity.\n");
  35.391 -            opt_noirqbalance = 1;
  35.392 -            setup_ioapic_dest();
  35.393 -            break;
  35.394 -        case QUIRK_IOAPIC_BAD_REGSEL:
  35.395 -        case QUIRK_IOAPIC_GOOD_REGSEL:
  35.396 -#ifndef sis_apic_bug
  35.397 -            sis_apic_bug = (quirk_id == QUIRK_IOAPIC_BAD_REGSEL);
  35.398 -            DPRINTK("Domain 0 says that IO-APIC REGSEL is %s\n",
  35.399 -                    sis_apic_bug ? "bad" : "good");
  35.400 -#else
  35.401 -            BUG_ON(sis_apic_bug != (quirk_id == QUIRK_IOAPIC_BAD_REGSEL));
  35.402 -#endif
  35.403 -            break;
  35.404 -        default:
  35.405 -            ret = -EINVAL;
  35.406 -            break;
  35.407 -        }
  35.408 -    }
  35.409 -    break;
  35.410 -
  35.411 -    case DOM0_HYPERCALL_INIT:
  35.412 -    {
  35.413 -        struct domain *d = find_domain_by_id(op->u.hypercall_init.domain);
  35.414 -        unsigned long gmfn = op->u.hypercall_init.gmfn;
  35.415 -        unsigned long mfn;
  35.416 -        void *hypercall_page;
  35.417 -
  35.418 -        ret = -ESRCH;
  35.419 -        if ( unlikely(d == NULL) )
  35.420 -            break;
  35.421 -
  35.422 -        mfn = gmfn_to_mfn(d, gmfn);
  35.423 -
  35.424 -        ret = -EACCES;
  35.425 -        if ( !mfn_valid(mfn) ||
  35.426 -             !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
  35.427 -        {
  35.428 -            put_domain(d);
  35.429 -            break;
  35.430 -        }
  35.431 -
  35.432 -        ret = 0;
  35.433 -
  35.434 -        hypercall_page = map_domain_page(mfn);
  35.435 -        hypercall_page_initialise(d, hypercall_page);
  35.436 -        unmap_domain_page(hypercall_page);
  35.437 -
  35.438 -        put_page_and_type(mfn_to_page(mfn));
  35.439 -
  35.440 -        put_domain(d);
  35.441 -    }
  35.442 -    break;
  35.443 -
  35.444 -    default:
  35.445 -        ret = -ENOSYS;
  35.446 -        break;
  35.447 -    }
  35.448 -
  35.449 -    return ret;
  35.450 -}
  35.451 -
  35.452 -void arch_getdomaininfo_ctxt(
  35.453 -    struct vcpu *v, struct vcpu_guest_context *c)
  35.454 -{
  35.455 -    memcpy(c, &v->arch.guest_context, sizeof(*c));
  35.456 -
  35.457 -    if ( hvm_guest(v) )
  35.458 -    {
  35.459 -        hvm_store_cpu_guest_regs(v, &c->user_regs, c->ctrlreg);
  35.460 -    }
  35.461 -    else
  35.462 -    {
  35.463 -        /* IOPL privileges are virtualised: merge back into returned eflags. */
  35.464 -        BUG_ON((c->user_regs.eflags & EF_IOPL) != 0);
  35.465 -        c->user_regs.eflags |= v->arch.iopl << 12;
  35.466 -    }
  35.467 -
  35.468 -    c->flags = 0;
  35.469 -    if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
  35.470 -        c->flags |= VGCF_I387_VALID;
  35.471 -    if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
  35.472 -        c->flags |= VGCF_IN_KERNEL;
  35.473 -    if ( hvm_guest(v) )
  35.474 -        c->flags |= VGCF_HVM_GUEST;
  35.475 -
  35.476 -    c->ctrlreg[3] = xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table));
  35.477 -
  35.478 -    c->vm_assist = v->domain->vm_assist;
  35.479 -}
  35.480 -
  35.481 -/*
  35.482 - * Local variables:
  35.483 - * mode: C
  35.484 - * c-set-style: "BSD"
  35.485 - * c-basic-offset: 4
  35.486 - * tab-width: 4
  35.487 - * indent-tabs-mode: nil
  35.488 - * End:
  35.489 - */
    36.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    36.2 +++ b/xen/arch/x86/domctl.c	Fri Aug 25 18:39:10 2006 +0100
    36.3 @@ -0,0 +1,326 @@
    36.4 +/******************************************************************************
    36.5 + * Arch-specific domctl.c
    36.6 + * 
    36.7 + * Copyright (c) 2002-2006, K A Fraser
    36.8 + */
    36.9 +
   36.10 +#include <xen/config.h>
   36.11 +#include <xen/types.h>
   36.12 +#include <xen/lib.h>
   36.13 +#include <xen/mm.h>
   36.14 +#include <xen/guest_access.h>
   36.15 +#include <public/domctl.h>
   36.16 +#include <xen/sched.h>
   36.17 +#include <xen/event.h>
   36.18 +#include <xen/domain_page.h>
   36.19 +#include <asm/msr.h>
   36.20 +#include <xen/trace.h>
   36.21 +#include <xen/console.h>
   36.22 +#include <xen/iocap.h>
   36.23 +#include <asm/shadow.h>
   36.24 +#include <asm/irq.h>
   36.25 +#include <asm/hvm/hvm.h>
   36.26 +#include <asm/hvm/support.h>
   36.27 +#include <asm/processor.h>
   36.28 +
   36.29 +long arch_do_domctl(
   36.30 +    struct xen_domctl *domctl,
   36.31 +    XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
   36.32 +{
   36.33 +    long ret = 0;
   36.34 +
   36.35 +    switch ( domctl->cmd )
   36.36 +    {
   36.37 +
   36.38 +    case XEN_DOMCTL_shadow_op:
   36.39 +    {
   36.40 +        struct domain *d;
   36.41 +        ret = -ESRCH;
   36.42 +        d = find_domain_by_id(domctl->domain);
   36.43 +        if ( d != NULL )
   36.44 +        {
   36.45 +            ret = shadow2_domctl(d, &domctl->u.shadow_op, u_domctl);
   36.46 +            put_domain(d);
   36.47 +            copy_to_guest(u_domctl, domctl, 1);
   36.48 +        } 
   36.49 +    }
   36.50 +    break;
   36.51 +
   36.52 +    case XEN_DOMCTL_ioport_permission:
   36.53 +    {
   36.54 +        struct domain *d;
   36.55 +        unsigned int fp = domctl->u.ioport_permission.first_port;
   36.56 +        unsigned int np = domctl->u.ioport_permission.nr_ports;
   36.57 +
   36.58 +        ret = -EINVAL;
   36.59 +        if ( (fp + np) > 65536 )
   36.60 +            break;
   36.61 +
   36.62 +        ret = -ESRCH;
   36.63 +        if ( unlikely((d = find_domain_by_id(domctl->domain)) == NULL) )
   36.64 +            break;
   36.65 +
   36.66 +        if ( np == 0 )
   36.67 +            ret = 0;
   36.68 +        else if ( domctl->u.ioport_permission.allow_access )
   36.69 +            ret = ioports_permit_access(d, fp, fp + np - 1);
   36.70 +        else
   36.71 +            ret = ioports_deny_access(d, fp, fp + np - 1);
   36.72 +
   36.73 +        put_domain(d);
   36.74 +    }
   36.75 +    break;
   36.76 +
   36.77 +    case XEN_DOMCTL_getpageframeinfo:
   36.78 +    {
   36.79 +        struct page_info *page;
   36.80 +        unsigned long mfn = domctl->u.getpageframeinfo.gmfn;
   36.81 +        domid_t dom = domctl->domain;
   36.82 +        struct domain *d;
   36.83 +
   36.84 +        ret = -EINVAL;
   36.85 +
   36.86 +        if ( unlikely(!mfn_valid(mfn)) ||
   36.87 +             unlikely((d = find_domain_by_id(dom)) == NULL) )
   36.88 +            break;
   36.89 +
   36.90 +        page = mfn_to_page(mfn);
   36.91 +
   36.92 +        if ( likely(get_page(page, d)) )
   36.93 +        {
   36.94 +            ret = 0;
   36.95 +
   36.96 +            domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_NOTAB;
   36.97 +
   36.98 +            if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
   36.99 +            {
  36.100 +                switch ( page->u.inuse.type_info & PGT_type_mask )
  36.101 +                {
  36.102 +                case PGT_l1_page_table:
  36.103 +                    domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L1TAB;
  36.104 +                    break;
  36.105 +                case PGT_l2_page_table:
  36.106 +                    domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L2TAB;
  36.107 +                    break;
  36.108 +                case PGT_l3_page_table:
  36.109 +                    domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L3TAB;
  36.110 +                    break;
  36.111 +                case PGT_l4_page_table:
  36.112 +                    domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L4TAB;
  36.113 +                    break;
  36.114 +                }
  36.115 +            }
  36.116 +            
  36.117 +            put_page(page);
  36.118 +        }
  36.119 +
  36.120 +        put_domain(d);
  36.121 +
  36.122 +        copy_to_guest(u_domctl, domctl, 1);
  36.123 +    }
  36.124 +    break;
  36.125 +
  36.126 +    case XEN_DOMCTL_getpageframeinfo2:
  36.127 +    {
  36.128 +#define GPF2_BATCH (PAGE_SIZE / sizeof(long))
  36.129 +        int n,j;
  36.130 +        int num = domctl->u.getpageframeinfo2.num;
  36.131 +        domid_t dom = domctl->domain;
  36.132 +        struct domain *d;
  36.133 +        unsigned long *l_arr;
  36.134 +        ret = -ESRCH;
  36.135 +
  36.136 +        if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
  36.137 +            break;
  36.138 +
  36.139 +        if ( unlikely(num > 1024) )
  36.140 +        {
  36.141 +            ret = -E2BIG;
  36.142 +            put_domain(d);
  36.143 +            break;
  36.144 +        }
  36.145 +
  36.146 +        l_arr = alloc_xenheap_page();
  36.147 + 
  36.148 +        ret = 0;
  36.149 +        for ( n = 0; n < num; )
  36.150 +        {
  36.151 +            int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
  36.152 +
  36.153 +            if ( copy_from_guest_offset(l_arr,
  36.154 +                                        domctl->u.getpageframeinfo2.array,
  36.155 +                                        n, k) )
  36.156 +            {
  36.157 +                ret = -EINVAL;
  36.158 +                break;
  36.159 +            }
  36.160 +     
  36.161 +            for ( j = 0; j < k; j++ )
  36.162 +            {      
  36.163 +                struct page_info *page;
  36.164 +                unsigned long mfn = l_arr[j];
  36.165 +
  36.166 +                page = mfn_to_page(mfn);
  36.167 +
  36.168 +                if ( likely(mfn_valid(mfn) && get_page(page, d)) ) 
  36.169 +                {
  36.170 +                    unsigned long type = 0;
  36.171 +
  36.172 +                    switch( page->u.inuse.type_info & PGT_type_mask )
  36.173 +                    {
  36.174 +                    case PGT_l1_page_table:
  36.175 +                        type = XEN_DOMCTL_PFINFO_L1TAB;
  36.176 +                        break;
  36.177 +                    case PGT_l2_page_table:
  36.178 +                        type = XEN_DOMCTL_PFINFO_L2TAB;
  36.179 +                        break;
  36.180 +                    case PGT_l3_page_table:
  36.181 +                        type = XEN_DOMCTL_PFINFO_L3TAB;
  36.182 +                        break;
  36.183 +                    case PGT_l4_page_table:
  36.184 +                        type = XEN_DOMCTL_PFINFO_L4TAB;
  36.185 +                        break;
  36.186 +                    }
  36.187 +
  36.188 +                    if ( page->u.inuse.type_info & PGT_pinned )
  36.189 +                        type |= XEN_DOMCTL_PFINFO_LPINTAB;
  36.190 +                    l_arr[j] |= type;
  36.191 +                    put_page(page);
  36.192 +                }
  36.193 +                else
  36.194 +                    l_arr[j] |= XEN_DOMCTL_PFINFO_XTAB;
  36.195 +
  36.196 +            }
  36.197 +
  36.198 +            if ( copy_to_guest_offset(domctl->u.getpageframeinfo2.array,
  36.199 +                                      n, l_arr, k) )
  36.200 +            {
  36.201 +                ret = -EINVAL;
  36.202 +                break;
  36.203 +            }
  36.204 +
  36.205 +            n += k;
  36.206 +        }
  36.207 +
  36.208 +        free_xenheap_page(l_arr);
  36.209 +
  36.210 +        put_domain(d);
  36.211 +    }
  36.212 +    break;
  36.213 +
  36.214 +    case XEN_DOMCTL_getmemlist:
  36.215 +    {
  36.216 +        int i;
  36.217 +        struct domain *d = find_domain_by_id(domctl->domain);
  36.218 +        unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
  36.219 +        unsigned long mfn;
  36.220 +        struct list_head *list_ent;
  36.221 +
  36.222 +        ret = -EINVAL;
  36.223 +        if ( d != NULL )
  36.224 +        {
  36.225 +            ret = 0;
  36.226 +
  36.227 +            spin_lock(&d->page_alloc_lock);
  36.228 +            list_ent = d->page_list.next;
  36.229 +            for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
  36.230 +            {
  36.231 +                mfn = page_to_mfn(list_entry(
  36.232 +                    list_ent, struct page_info, list));
  36.233 +                if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
  36.234 +                                          i, &mfn, 1) )
  36.235 +                {
  36.236 +                    ret = -EFAULT;
  36.237 +                    break;
  36.238 +                }
  36.239 +                list_ent = mfn_to_page(mfn)->list.next;
  36.240 +            }
  36.241 +            spin_unlock(&d->page_alloc_lock);
  36.242 +
  36.243 +            domctl->u.getmemlist.num_pfns = i;
  36.244 +            copy_to_guest(u_domctl, domctl, 1);
  36.245 +
  36.246 +            put_domain(d);
  36.247 +        }
  36.248 +    }
  36.249 +    break;
  36.250 +
  36.251 +    case XEN_DOMCTL_hypercall_init:
  36.252 +    {
  36.253 +        struct domain *d = find_domain_by_id(domctl->domain);
  36.254 +        unsigned long gmfn = domctl->u.hypercall_init.gmfn;
  36.255 +        unsigned long mfn;
  36.256 +        void *hypercall_page;
  36.257 +
  36.258 +        ret = -ESRCH;
  36.259 +        if ( unlikely(d == NULL) )
  36.260 +            break;
  36.261 +
  36.262 +        mfn = gmfn_to_mfn(d, gmfn);
  36.263 +
  36.264 +        ret = -EACCES;
  36.265 +        if ( !mfn_valid(mfn) ||
  36.266 +             !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
  36.267 +        {
  36.268 +            put_domain(d);
  36.269 +            break;
  36.270 +        }
  36.271 +
  36.272 +        ret = 0;
  36.273 +
  36.274 +        hypercall_page = map_domain_page(mfn);
  36.275 +        hypercall_page_initialise(d, hypercall_page);
  36.276 +        unmap_domain_page(hypercall_page);
  36.277 +
  36.278 +        put_page_and_type(mfn_to_page(mfn));
  36.279 +
  36.280 +        put_domain(d);
  36.281 +    }
  36.282 +    break;
  36.283 +
  36.284 +    default:
  36.285 +        ret = -ENOSYS;
  36.286 +        break;
  36.287 +    }
  36.288 +
  36.289 +    return ret;
  36.290 +}
  36.291 +
  36.292 +void arch_getdomaininfo_ctxt(
  36.293 +    struct vcpu *v, struct vcpu_guest_context *c)
  36.294 +{
  36.295 +    memcpy(c, &v->arch.guest_context, sizeof(*c));
  36.296 +
  36.297 +    if ( hvm_guest(v) )
  36.298 +    {
  36.299 +        hvm_store_cpu_guest_regs(v, &c->user_regs, c->ctrlreg);
  36.300 +    }
  36.301 +    else
  36.302 +    {
  36.303 +        /* IOPL privileges are virtualised: merge back into returned eflags. */
  36.304 +        BUG_ON((c->user_regs.eflags & EF_IOPL) != 0);
  36.305 +        c->user_regs.eflags |= v->arch.iopl << 12;
  36.306 +    }
  36.307 +
  36.308 +    c->flags = 0;
  36.309 +    if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
  36.310 +        c->flags |= VGCF_I387_VALID;
  36.311 +    if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
  36.312 +        c->flags |= VGCF_IN_KERNEL;
  36.313 +    if ( hvm_guest(v) )
  36.314 +        c->flags |= VGCF_HVM_GUEST;
  36.315 +
  36.316 +    c->ctrlreg[3] = xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table));
  36.317 +
  36.318 +    c->vm_assist = v->domain->vm_assist;
  36.319 +}
  36.320 +
  36.321 +/*
  36.322 + * Local variables:
  36.323 + * mode: C
  36.324 + * c-set-style: "BSD"
  36.325 + * c-basic-offset: 4
  36.326 + * tab-width: 4
  36.327 + * indent-tabs-mode: nil
  36.328 + * End:
  36.329 + */
    37.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    37.2 +++ b/xen/arch/x86/platform_hypercall.c	Fri Aug 25 18:39:10 2006 +0100
    37.3 @@ -0,0 +1,159 @@
    37.4 +/******************************************************************************
    37.5 + * platform_hypercall.c
    37.6 + * 
    37.7 + * Hardware platform operations. Intended for use by domain-0 kernel.
    37.8 + * 
    37.9 + * Copyright (c) 2002-2006, K Fraser
   37.10 + */
   37.11 +
   37.12 +#include <xen/config.h>
   37.13 +#include <xen/types.h>
   37.14 +#include <xen/lib.h>
   37.15 +#include <xen/mm.h>
   37.16 +#include <xen/sched.h>
   37.17 +#include <xen/domain.h>
   37.18 +#include <xen/event.h>
   37.19 +#include <xen/domain_page.h>
   37.20 +#include <xen/trace.h>
   37.21 +#include <xen/console.h>
   37.22 +#include <xen/iocap.h>
   37.23 +#include <xen/guest_access.h>
   37.24 +#include <asm/current.h>
   37.25 +#include <public/platform.h>
   37.26 +#include <asm/mtrr.h>
   37.27 +#include "cpu/mtrr/mtrr.h"
   37.28 +
   37.29 +long do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
   37.30 +{
   37.31 +    long ret = 0;
   37.32 +    struct xen_platform_op curop, *op = &curop;
   37.33 +    static DEFINE_SPINLOCK(xenpf_lock);
   37.34 +
   37.35 +    if ( !IS_PRIV(current->domain) )
   37.36 +        return -EPERM;
   37.37 +
   37.38 +    if ( copy_from_guest(op, u_xenpf_op, 1) )
   37.39 +        return -EFAULT;
   37.40 +
   37.41 +    if ( op->interface_version != XENPF_INTERFACE_VERSION )
   37.42 +        return -EACCES;
   37.43 +
   37.44 +    spin_lock(&xenpf_lock);
   37.45 +
   37.46 +    switch ( op->cmd )
   37.47 +    {
   37.48 +    case XENPF_settime:
   37.49 +    {
   37.50 +        do_settime(op->u.settime.secs, 
   37.51 +                   op->u.settime.nsecs, 
   37.52 +                   op->u.settime.system_time);
   37.53 +        ret = 0;
   37.54 +    }
   37.55 +    break;
   37.56 +
   37.57 +    case XENPF_add_memtype:
   37.58 +    {
   37.59 +        ret = mtrr_add_page(
   37.60 +            op->u.add_memtype.mfn,
   37.61 +            op->u.add_memtype.nr_mfns,
   37.62 +            op->u.add_memtype.type,
   37.63 +            1);
   37.64 +        if ( ret > 0 )
   37.65 +        {
   37.66 +            op->u.add_memtype.handle = 0;
   37.67 +            op->u.add_memtype.reg    = ret;
   37.68 +            (void)copy_to_guest(u_xenpf_op, op, 1);
   37.69 +            ret = 0;
   37.70 +        }
   37.71 +    }
   37.72 +    break;
   37.73 +
   37.74 +    case XENPF_del_memtype:
   37.75 +    {
   37.76 +        if (op->u.del_memtype.handle == 0
   37.77 +            /* mtrr/main.c otherwise does a lookup */
   37.78 +            && (int)op->u.del_memtype.reg >= 0)
   37.79 +        {
   37.80 +            ret = mtrr_del_page(op->u.del_memtype.reg, 0, 0);
   37.81 +            if (ret > 0)
   37.82 +                ret = 0;
   37.83 +        }
   37.84 +        else
   37.85 +            ret = -EINVAL;
   37.86 +    }
   37.87 +    break;
   37.88 +
   37.89 +    case XENPF_read_memtype:
   37.90 +    {
   37.91 +        unsigned long mfn;
   37.92 +        unsigned int  nr_mfns;
   37.93 +        mtrr_type     type;
   37.94 +
   37.95 +        ret = -EINVAL;
   37.96 +        if ( op->u.read_memtype.reg < num_var_ranges )
   37.97 +        {
   37.98 +            mtrr_if->get(op->u.read_memtype.reg, &mfn, &nr_mfns, &type);
   37.99 +            op->u.read_memtype.mfn     = mfn;
  37.100 +            op->u.read_memtype.nr_mfns = nr_mfns;
  37.101 +            op->u.read_memtype.type    = type;
  37.102 +            (void)copy_to_guest(u_xenpf_op, op, 1);
  37.103 +            ret = 0;
  37.104 +        }
  37.105 +    }
  37.106 +    break;
  37.107 +
  37.108 +    case XENPF_microcode_update:
  37.109 +    {
  37.110 +        extern int microcode_update(void *buf, unsigned long len);
  37.111 +        ret = microcode_update(op->u.microcode.data.p,
  37.112 +                               op->u.microcode.length);
  37.113 +    }
  37.114 +    break;
  37.115 +
  37.116 +    case XENPF_platform_quirk:
  37.117 +    {
  37.118 +        extern int opt_noirqbalance;
  37.119 +        int quirk_id = op->u.platform_quirk.quirk_id;
  37.120 +        switch ( quirk_id )
  37.121 +        {
  37.122 +        case QUIRK_NOIRQBALANCING:
  37.123 +            printk("Platform quirk -- Disabling IRQ balancing/affinity.\n");
  37.124 +            opt_noirqbalance = 1;
  37.125 +            setup_ioapic_dest();
  37.126 +            break;
  37.127 +        case QUIRK_IOAPIC_BAD_REGSEL:
  37.128 +        case QUIRK_IOAPIC_GOOD_REGSEL:
  37.129 +#ifndef sis_apic_bug
  37.130 +            sis_apic_bug = (quirk_id == QUIRK_IOAPIC_BAD_REGSEL);
  37.131 +            DPRINTK("Domain 0 says that IO-APIC REGSEL is %s\n",
  37.132 +                    sis_apic_bug ? "bad" : "good");
  37.133 +#else
  37.134 +            BUG_ON(sis_apic_bug != (quirk_id == QUIRK_IOAPIC_BAD_REGSEL));
  37.135 +#endif
  37.136 +            break;
  37.137 +        default:
  37.138 +            ret = -EINVAL;
  37.139 +            break;
  37.140 +        }
  37.141 +    }
  37.142 +    break;
  37.143 +
  37.144 +    default:
  37.145 +        ret = -ENOSYS;
  37.146 +        break;
  37.147 +    }
  37.148 +
  37.149 +    spin_unlock(&xenpf_lock);
  37.150 +
  37.151 +    return ret;
  37.152 +}
  37.153 +
  37.154 +/*
  37.155 + * Local variables:
  37.156 + * mode: C
  37.157 + * c-set-style: "BSD"
  37.158 + * c-basic-offset: 4
  37.159 + * tab-width: 4
  37.160 + * indent-tabs-mode: nil
  37.161 + * End:
  37.162 + */
    38.1 --- a/xen/arch/x86/setup.c	Fri Aug 25 10:39:24 2006 +0100
    38.2 +++ b/xen/arch/x86/setup.c	Fri Aug 25 18:39:10 2006 +0100
    38.3 @@ -15,6 +15,7 @@
    38.4  #include <xen/version.h>
    38.5  #include <xen/gdbstub.h>
    38.6  #include <xen/percpu.h>
    38.7 +#include <xen/hypercall.h>
    38.8  #include <public/version.h>
    38.9  #include <asm/bitops.h>
   38.10  #include <asm/smp.h>
   38.11 @@ -416,9 +417,13 @@ void __init __start_xen(multiboot_info_t
   38.12             nr_pages << (PAGE_SHIFT - 10));
   38.13      total_pages = nr_pages;
   38.14  
   38.15 -    /* Sanity check for unwanted bloat of dom0_op structure. */
   38.16 -    BUILD_BUG_ON(sizeof(((struct dom0_op *)0)->u) !=
   38.17 -                 sizeof(((struct dom0_op *)0)->u.pad));
   38.18 +    /* Sanity check for unwanted bloat of certain hypercall structures. */
   38.19 +    BUILD_BUG_ON(sizeof(((struct xen_platform_op *)0)->u) !=
   38.20 +                 sizeof(((struct xen_platform_op *)0)->u.pad));
   38.21 +    BUILD_BUG_ON(sizeof(((struct xen_domctl *)0)->u) !=
   38.22 +                 sizeof(((struct xen_domctl *)0)->u.pad));
   38.23 +    BUILD_BUG_ON(sizeof(((struct xen_sysctl *)0)->u) !=
   38.24 +                 sizeof(((struct xen_sysctl *)0)->u.pad));
   38.25  
   38.26      BUILD_BUG_ON(sizeof(start_info_t) > PAGE_SIZE);
   38.27      BUILD_BUG_ON(sizeof(shared_info_t) > PAGE_SIZE);
    39.1 --- a/xen/arch/x86/shadow2-common.c	Fri Aug 25 10:39:24 2006 +0100
    39.2 +++ b/xen/arch/x86/shadow2-common.c	Fri Aug 25 18:39:10 2006 +0100
    39.3 @@ -2951,14 +2951,15 @@ void shadow2_convert_to_log_dirty(struct
    39.4  
    39.5  /* Read a domain's log-dirty bitmap and stats.  
    39.6   * If the operation is a CLEAN, clear the bitmap and stats as well. */
    39.7 -static int shadow2_log_dirty_op(struct domain *d, dom0_shadow_control_t *sc)
    39.8 -{    
    39.9 +static int shadow2_log_dirty_op(
   39.10 +    struct domain *d, struct xen_domctl_shadow_op *sc)
   39.11 +{
   39.12      int i, rv = 0, clean = 0;
   39.13  
   39.14      domain_pause(d);
   39.15      shadow2_lock(d);
   39.16  
   39.17 -    clean = (sc->op == DOM0_SHADOW_CONTROL_OP_CLEAN);
   39.18 +    clean = (sc->op == XEN_DOMCTL_SHADOW_OP_CLEAN);
   39.19  
   39.20      SHADOW2_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n", 
   39.21                    (clean) ? "clean" : "peek",
   39.22 @@ -3081,11 +3082,11 @@ void sh2_do_mark_dirty(struct domain *d,
   39.23  
   39.24  
   39.25  /**************************************************************************/
   39.26 -/* Shadow-control DOM0_OP dispatcher */
   39.27 -
   39.28 -int shadow2_control_op(struct domain *d, 
   39.29 -                       dom0_shadow_control_t *sc,
   39.30 -                       XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op)
   39.31 +/* Shadow-control XEN_DOMCTL dispatcher */
   39.32 +
   39.33 +int shadow2_domctl(struct domain *d, 
   39.34 +                   xen_domctl_shadow_op_t *sc,
   39.35 +                   XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
   39.36  {
   39.37      int rc, preempted = 0;
   39.38  
   39.39 @@ -3097,7 +3098,7 @@ int shadow2_control_op(struct domain *d,
   39.40  
   39.41      switch ( sc->op )
   39.42      {
   39.43 -    case DOM0_SHADOW_CONTROL_OP_OFF:
   39.44 +    case XEN_DOMCTL_SHADOW_OP_OFF:
   39.45          if ( shadow2_mode_log_dirty(d) )
   39.46              if ( (rc = shadow2_log_dirty_disable(d)) != 0 ) 
   39.47                  return rc;
   39.48 @@ -3106,34 +3107,34 @@ int shadow2_control_op(struct domain *d,
   39.49                  return rc;
   39.50          return 0;
   39.51  
   39.52 -    case DOM0_SHADOW_CONTROL_OP_ENABLE_TEST:
   39.53 +    case XEN_DOMCTL_SHADOW_OP_ENABLE_TEST:
   39.54          return shadow2_test_enable(d);
   39.55  
   39.56 -    case DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY:
   39.57 +    case XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY:
   39.58          return shadow2_log_dirty_enable(d);
   39.59  
   39.60 -    case DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE:
   39.61 +    case XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE:
   39.62          return shadow2_enable(d, SHM2_refcounts|SHM2_translate);
   39.63  
   39.64 -    case DOM0_SHADOW_CONTROL_OP_CLEAN:
   39.65 -    case DOM0_SHADOW_CONTROL_OP_PEEK:
   39.66 +    case XEN_DOMCTL_SHADOW_OP_CLEAN:
   39.67 +    case XEN_DOMCTL_SHADOW_OP_PEEK:
   39.68          return shadow2_log_dirty_op(d, sc);
   39.69  
   39.70 -    case DOM0_SHADOW_CONTROL_OP_ENABLE:
   39.71 -        if ( sc->mode & DOM0_SHADOW_ENABLE_LOG_DIRTY )
   39.72 +    case XEN_DOMCTL_SHADOW_OP_ENABLE:
   39.73 +        if ( sc->mode & XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY )
   39.74              return shadow2_log_dirty_enable(d);
   39.75          return shadow2_enable(d, sc->mode << SHM2_shift);
   39.76  
   39.77 -    case DOM0_SHADOW_CONTROL_OP_GET_ALLOCATION:
   39.78 +    case XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION:
   39.79          sc->mb = shadow2_get_allocation(d);
   39.80          return 0;
   39.81  
   39.82 -    case DOM0_SHADOW_CONTROL_OP_SET_ALLOCATION:
   39.83 +    case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
   39.84          rc = shadow2_set_allocation(d, sc->mb, &preempted);
   39.85          if ( preempted )
   39.86              /* Not finished.  Set up to re-run the call. */
   39.87              rc = hypercall_create_continuation(
   39.88 -                __HYPERVISOR_dom0_op, "h", u_dom0_op);
   39.89 +                __HYPERVISOR_domctl, "h", u_domctl);
   39.90          else 
   39.91              /* Finished.  Return the new allocation */
   39.92              sc->mb = shadow2_get_allocation(d);
    40.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    40.2 +++ b/xen/arch/x86/sysctl.c	Fri Aug 25 18:39:10 2006 +0100
    40.3 @@ -0,0 +1,77 @@
    40.4 +/******************************************************************************
    40.5 + * Arch-specific sysctl.c
    40.6 + * 
    40.7 + * System management operations. For use by node control stack.
    40.8 + * 
    40.9 + * Copyright (c) 2002-2006, K Fraser
   40.10 + */
   40.11 +
   40.12 +#include <xen/config.h>
   40.13 +#include <xen/types.h>
   40.14 +#include <xen/lib.h>
   40.15 +#include <xen/mm.h>
   40.16 +#include <xen/guest_access.h>
   40.17 +#include <public/sysctl.h>
   40.18 +#include <xen/sched.h>
   40.19 +#include <xen/event.h>
   40.20 +#include <xen/domain_page.h>
   40.21 +#include <asm/msr.h>
   40.22 +#include <xen/trace.h>
   40.23 +#include <xen/console.h>
   40.24 +#include <xen/iocap.h>
   40.25 +#include <asm/shadow.h>
   40.26 +#include <asm/irq.h>
   40.27 +#include <asm/hvm/hvm.h>
   40.28 +#include <asm/hvm/support.h>
   40.29 +#include <asm/processor.h>
   40.30 +
   40.31 +long arch_do_sysctl(
   40.32 +    struct xen_sysctl *sysctl, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
   40.33 +{
   40.34 +    long ret = 0;
   40.35 +
   40.36 +    switch ( sysctl->cmd )
   40.37 +    {
   40.38 +
   40.39 +    case XEN_SYSCTL_physinfo:
   40.40 +    {
   40.41 +        xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo;
   40.42 +
   40.43 +        pi->threads_per_core =
   40.44 +            cpus_weight(cpu_sibling_map[0]);
   40.45 +        pi->cores_per_socket =
   40.46 +            cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
   40.47 +        pi->sockets_per_node = 
   40.48 +            num_online_cpus() / cpus_weight(cpu_core_map[0]);
   40.49 +
   40.50 +        pi->nr_nodes         = 1;
   40.51 +        pi->total_pages      = total_pages;
   40.52 +        pi->free_pages       = avail_domheap_pages();
   40.53 +        pi->scrub_pages      = avail_scrub_pages();
   40.54 +        pi->cpu_khz          = cpu_khz;
   40.55 +        memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
   40.56 +        memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
   40.57 +        ret = 0;
   40.58 +        if ( copy_to_guest(u_sysctl, sysctl, 1) )
   40.59 +            ret = -EFAULT;
   40.60 +    }
   40.61 +    break;
   40.62 +    
   40.63 +
   40.64 +    default:
   40.65 +        ret = -ENOSYS;
   40.66 +        break;
   40.67 +    }
   40.68 +
   40.69 +    return ret;
   40.70 +}
   40.71 +
   40.72 +/*
   40.73 + * Local variables:
   40.74 + * mode: C
   40.75 + * c-set-style: "BSD"
   40.76 + * c-basic-offset: 4
   40.77 + * tab-width: 4
   40.78 + * indent-tabs-mode: nil
   40.79 + * End:
   40.80 + */
    41.1 --- a/xen/arch/x86/x86_32/entry.S	Fri Aug 25 10:39:24 2006 +0100
    41.2 +++ b/xen/arch/x86/x86_32/entry.S	Fri Aug 25 18:39:10 2006 +0100
    41.3 @@ -630,7 +630,7 @@ ENTRY(hypercall_table)
    41.4          .long do_set_callbacks
    41.5          .long do_fpu_taskswitch     /*  5 */
    41.6          .long do_arch_sched_op_compat
    41.7 -        .long do_dom0_op
    41.8 +        .long do_platform_op
    41.9          .long do_set_debugreg
   41.10          .long do_get_debugreg
   41.11          .long do_update_descriptor  /* 10 */
   41.12 @@ -657,7 +657,9 @@ ENTRY(hypercall_table)
   41.13          .long do_xenoprof_op
   41.14          .long do_event_channel_op
   41.15          .long do_physdev_op
   41.16 -        .long do_hvm_op             /* 34 */
   41.17 +        .long do_hvm_op
   41.18 +        .long do_sysctl             /* 35 */
   41.19 +        .long do_domctl
   41.20          .rept NR_hypercalls-((.-hypercall_table)/4)
   41.21          .long do_ni_hypercall
   41.22          .endr
   41.23 @@ -670,7 +672,7 @@ ENTRY(hypercall_args_table)
   41.24          .byte 4 /* do_set_callbacks     */
   41.25          .byte 1 /* do_fpu_taskswitch    */  /*  5 */
   41.26          .byte 2 /* do_arch_sched_op_compat */
   41.27 -        .byte 1 /* do_dom0_op           */
   41.28 +        .byte 1 /* do_platform_op       */
   41.29          .byte 2 /* do_set_debugreg      */
   41.30          .byte 1 /* do_get_debugreg      */
   41.31          .byte 4 /* do_update_descriptor */  /* 10 */
   41.32 @@ -697,7 +699,9 @@ ENTRY(hypercall_args_table)
   41.33          .byte 2 /* do_xenoprof_op       */
   41.34          .byte 2 /* do_event_channel_op  */
   41.35          .byte 2 /* do_physdev_op        */
   41.36 -        .byte 2 /* do_hvm_op            */  /* 34 */
   41.37 +        .byte 2 /* do_hvm_op            */
   41.38 +        .byte 1 /* do_sysctl            */  /* 35 */
   41.39 +        .byte 1 /* do_domctl            */
   41.40          .rept NR_hypercalls-(.-hypercall_args_table)
   41.41          .byte 0 /* do_ni_hypercall      */
   41.42          .endr
    42.1 --- a/xen/arch/x86/x86_64/entry.S	Fri Aug 25 10:39:24 2006 +0100
    42.2 +++ b/xen/arch/x86/x86_64/entry.S	Fri Aug 25 18:39:10 2006 +0100
    42.3 @@ -543,7 +543,7 @@ ENTRY(hypercall_table)
    42.4          .quad do_set_callbacks
    42.5          .quad do_fpu_taskswitch     /*  5 */
    42.6          .quad do_arch_sched_op_compat
    42.7 -        .quad do_dom0_op
    42.8 +        .quad do_platform_op
    42.9          .quad do_set_debugreg
   42.10          .quad do_get_debugreg
   42.11          .quad do_update_descriptor  /* 10 */
   42.12 @@ -571,6 +571,8 @@ ENTRY(hypercall_table)
   42.13          .quad do_event_channel_op
   42.14          .quad do_physdev_op
   42.15          .quad do_hvm_op
   42.16 +        .quad do_sysctl             /* 35 */
   42.17 +        .quad do_domctl
   42.18          .rept NR_hypercalls-((.-hypercall_table)/8)
   42.19          .quad do_ni_hypercall
   42.20          .endr
   42.21 @@ -583,7 +585,7 @@ ENTRY(hypercall_args_table)
   42.22          .byte 3 /* do_set_callbacks     */
   42.23          .byte 1 /* do_fpu_taskswitch    */  /*  5 */
   42.24          .byte 2 /* do_arch_sched_op_compat */
   42.25 -        .byte 1 /* do_dom0_op           */
   42.26 +        .byte 1 /* do_platform_op           */
   42.27          .byte 2 /* do_set_debugreg      */
   42.28          .byte 1 /* do_get_debugreg      */
   42.29          .byte 2 /* do_update_descriptor */  /* 10 */
   42.30 @@ -611,6 +613,8 @@ ENTRY(hypercall_args_table)
   42.31          .byte 2 /* do_event_channel_op  */
   42.32          .byte 2 /* do_physdev_op        */
   42.33          .byte 2 /* do_hvm_op            */
   42.34 +        .byte 1 /* do_sysctl            */  /* 35 */
   42.35 +        .byte 1 /* do_domctl            */
   42.36          .rept NR_hypercalls-(.-hypercall_args_table)
   42.37          .byte 0 /* do_ni_hypercall      */
   42.38          .endr
    43.1 --- a/xen/common/Makefile	Fri Aug 25 10:39:24 2006 +0100
    43.2 +++ b/xen/common/Makefile	Fri Aug 25 18:39:10 2006 +0100
    43.3 @@ -1,6 +1,6 @@
    43.4  obj-y += acm_ops.o
    43.5  obj-y += bitmap.o
    43.6 -obj-y += dom0_ops.o
    43.7 +obj-y += domctl.o
    43.8  obj-y += domain.o
    43.9  obj-y += elf.o
   43.10  obj-y += event_channel.o
   43.11 @@ -19,6 +19,7 @@ obj-y += shutdown.o
   43.12  obj-y += softirq.o
   43.13  obj-y += string.o
   43.14  obj-y += symbols.o
   43.15 +obj-y += sysctl.o
   43.16  obj-y += trace.o
   43.17  obj-y += timer.o
   43.18  obj-y += version.o
    44.1 --- a/xen/common/acm_ops.c	Fri Aug 25 10:39:24 2006 +0100
    44.2 +++ b/xen/common/acm_ops.c	Fri Aug 25 18:39:10 2006 +0100
    44.3 @@ -26,7 +26,6 @@
    44.4  #include <xen/trace.h>
    44.5  #include <xen/console.h>
    44.6  #include <xen/guest_access.h>
    44.7 -#include <public/sched_ctl.h>
    44.8  #include <acm/acm_hooks.h>
    44.9  
   44.10  #ifndef ACM_SECURITY
    45.1 --- a/xen/common/dom0_ops.c	Fri Aug 25 10:39:24 2006 +0100
    45.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    45.3 @@ -1,739 +0,0 @@
    45.4 -/******************************************************************************
    45.5 - * dom0_ops.c
    45.6 - * 
    45.7 - * Process command requests from domain-0 guest OS.
    45.8 - * 
    45.9 - * Copyright (c) 2002, K A Fraser
   45.10 - */
   45.11 -
   45.12 -#include <xen/config.h>
   45.13 -#include <xen/types.h>
   45.14 -#include <xen/lib.h>
   45.15 -#include <xen/mm.h>
   45.16 -#include <xen/sched.h>
   45.17 -#include <xen/domain.h>
   45.18 -#include <xen/event.h>
   45.19 -#include <xen/domain_page.h>
   45.20 -#include <xen/trace.h>
   45.21 -#include <xen/console.h>
   45.22 -#include <xen/iocap.h>
   45.23 -#include <xen/guest_access.h>
   45.24 -#include <asm/current.h>
   45.25 -#include <public/dom0_ops.h>
   45.26 -#include <public/sched_ctl.h>
   45.27 -#include <acm/acm_hooks.h>
   45.28 -
   45.29 -extern long arch_do_dom0_op(
   45.30 -    struct dom0_op *op, XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op);
   45.31 -extern void arch_getdomaininfo_ctxt(
   45.32 -    struct vcpu *, struct vcpu_guest_context *);
   45.33 -
   45.34 -static inline int is_free_domid(domid_t dom)
   45.35 -{
   45.36 -    struct domain *d;
   45.37 -
   45.38 -    if ( dom >= DOMID_FIRST_RESERVED )
   45.39 -        return 0;
   45.40 -
   45.41 -    if ( (d = find_domain_by_id(dom)) == NULL )
   45.42 -        return 1;
   45.43 -
   45.44 -    put_domain(d);
   45.45 -    return 0;
   45.46 -}
   45.47 -
   45.48 -static void getdomaininfo(struct domain *d, dom0_getdomaininfo_t *info)
   45.49 -{
   45.50 -    struct vcpu   *v;
   45.51 -    u64 cpu_time = 0;
   45.52 -    int flags = DOMFLAGS_BLOCKED;
   45.53 -    struct vcpu_runstate_info runstate;
   45.54 -    
   45.55 -    info->domain = d->domain_id;
   45.56 -    info->nr_online_vcpus = 0;
   45.57 -    
   45.58 -    /* 
   45.59 -     * - domain is marked as blocked only if all its vcpus are blocked
   45.60 -     * - domain is marked as running if any of its vcpus is running
   45.61 -     */
   45.62 -    for_each_vcpu ( d, v ) {
   45.63 -        vcpu_runstate_get(v, &runstate);
   45.64 -        cpu_time += runstate.time[RUNSTATE_running];
   45.65 -        info->max_vcpu_id = v->vcpu_id;
   45.66 -        if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
   45.67 -        {
   45.68 -            if ( !(v->vcpu_flags & VCPUF_blocked) )
   45.69 -                flags &= ~DOMFLAGS_BLOCKED;
   45.70 -            if ( v->vcpu_flags & VCPUF_running )
   45.71 -                flags |= DOMFLAGS_RUNNING;
   45.72 -            info->nr_online_vcpus++;
   45.73 -        }
   45.74 -    }
   45.75 -    
   45.76 -    info->cpu_time = cpu_time;
   45.77 -    
   45.78 -    info->flags = flags |
   45.79 -        ((d->domain_flags & DOMF_dying)      ? DOMFLAGS_DYING    : 0) |
   45.80 -        ((d->domain_flags & DOMF_shutdown)   ? DOMFLAGS_SHUTDOWN : 0) |
   45.81 -        ((d->domain_flags & DOMF_ctrl_pause) ? DOMFLAGS_PAUSED   : 0) |
   45.82 -        d->shutdown_code << DOMFLAGS_SHUTDOWNSHIFT;
   45.83 -
   45.84 -    if (d->ssid != NULL)
   45.85 -        info->ssidref = ((struct acm_ssid_domain *)d->ssid)->ssidref;
   45.86 -    else    
   45.87 -        info->ssidref = ACM_DEFAULT_SSID;
   45.88 -    
   45.89 -    info->tot_pages         = d->tot_pages;
   45.90 -    info->max_pages         = d->max_pages;
   45.91 -    info->shared_info_frame = __pa(d->shared_info) >> PAGE_SHIFT;
   45.92 -
   45.93 -    memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
   45.94 -}
   45.95 -
   45.96 -static unsigned int default_vcpu0_location(void)
   45.97 -{
   45.98 -    struct domain *d;
   45.99 -    struct vcpu   *v;
  45.100 -    unsigned int   i, cpu, cnt[NR_CPUS] = { 0 };
  45.101 -    cpumask_t      cpu_exclude_map;
  45.102 -
  45.103 -    /* Do an initial CPU placement. Pick the least-populated CPU. */
  45.104 -    read_lock(&domlist_lock);
  45.105 -    for_each_domain ( d )
  45.106 -        for_each_vcpu ( d, v )
  45.107 -        if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
  45.108 -            cnt[v->processor]++;
  45.109 -    read_unlock(&domlist_lock);
  45.110 -
  45.111 -    /*
  45.112 -     * If we're on a HT system, we only auto-allocate to a non-primary HT. We 
  45.113 -     * favour high numbered CPUs in the event of a tie.
  45.114 -     */
  45.115 -    cpu = first_cpu(cpu_sibling_map[0]);
  45.116 -    if ( cpus_weight(cpu_sibling_map[0]) > 1 )
  45.117 -        cpu = next_cpu(cpu, cpu_sibling_map[0]);
  45.118 -    cpu_exclude_map = cpu_sibling_map[0];
  45.119 -    for_each_online_cpu ( i )
  45.120 -    {
  45.121 -        if ( cpu_isset(i, cpu_exclude_map) )
  45.122 -            continue;
  45.123 -        if ( (i == first_cpu(cpu_sibling_map[i])) &&
  45.124 -             (cpus_weight(cpu_sibling_map[i]) > 1) )
  45.125 -            continue;
  45.126 -        cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
  45.127 -        if ( cnt[i] <= cnt[cpu] )
  45.128 -            cpu = i;
  45.129 -    }
  45.130 -
  45.131 -    return cpu;
  45.132 -}
  45.133 -
  45.134 -long do_dom0_op(XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op)
  45.135 -{
  45.136 -    long ret = 0;
  45.137 -    struct dom0_op curop, *op = &curop;
  45.138 -    void *ssid = NULL; /* save security ptr between pre and post/fail hooks */
  45.139 -    static DEFINE_SPINLOCK(dom0_lock);
  45.140 -
  45.141 -    if ( !IS_PRIV(current->domain) )
  45.142 -        return -EPERM;
  45.143 -
  45.144 -    if ( copy_from_guest(op, u_dom0_op, 1) )
  45.145 -        return -EFAULT;
  45.146 -
  45.147 -    if ( (op->interface_version != DOM0_TOOLS_INTERFACE_VERSION) &&
  45.148 -         (op->interface_version != DOM0_KERNEL_INTERFACE_VERSION) )
  45.149 -        return -EACCES;
  45.150 -
  45.151 -    if ( acm_pre_dom0_op(op, &ssid) )
  45.152 -        return -EPERM;
  45.153 -
  45.154 -    spin_lock(&dom0_lock);
  45.155 -
  45.156 -    switch ( op->cmd )
  45.157 -    {
  45.158 -
  45.159 -    case DOM0_SETVCPUCONTEXT:
  45.160 -    {
  45.161 -        struct domain *d = find_domain_by_id(op->u.setvcpucontext.domain);
  45.162 -        ret = -ESRCH;
  45.163 -        if ( d != NULL )
  45.164 -        {
  45.165 -            ret = set_info_guest(d, &op->u.setvcpucontext);
  45.166 -            put_domain(d);
  45.167 -        }
  45.168 -    }
  45.169 -    break;
  45.170 -
  45.171 -    case DOM0_PAUSEDOMAIN:
  45.172 -    {
  45.173 -        struct domain *d = find_domain_by_id(op->u.pausedomain.domain);
  45.174 -        ret = -ESRCH;
  45.175 -        if ( d != NULL )
  45.176 -        {
  45.177 -            ret = -EINVAL;
  45.178 -            if ( d != current->domain )
  45.179 -            {
  45.180 -                domain_pause_by_systemcontroller(d);
  45.181 -                ret = 0;
  45.182 -            }
  45.183 -            put_domain(d);
  45.184 -        }
  45.185 -    }
  45.186 -    break;
  45.187 -
  45.188 -    case DOM0_UNPAUSEDOMAIN:
  45.189 -    {
  45.190 -        struct domain *d = find_domain_by_id(op->u.unpausedomain.domain);
  45.191 -        ret = -ESRCH;
  45.192 -        if ( d != NULL )
  45.193 -        {
  45.194 -            ret = -EINVAL;
  45.195 -            if ( (d != current->domain) && (d->vcpu[0] != NULL) &&
  45.196 -                 test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
  45.197 -            {
  45.198 -                domain_unpause_by_systemcontroller(d);
  45.199 -                ret = 0;
  45.200 -            }
  45.201 -            put_domain(d);
  45.202 -        }
  45.203 -    }
  45.204 -    break;
  45.205 -
  45.206 -    case DOM0_CREATEDOMAIN:
  45.207 -    {
  45.208 -        struct domain *d;
  45.209 -        domid_t        dom;
  45.210 -        static domid_t rover = 0;
  45.211 -
  45.212 -        /*
  45.213 -         * Running the domain 0 kernel in ring 0 is not compatible
  45.214 -         * with multiple guests.
  45.215 -         */
  45.216 -        if ( supervisor_mode_kernel )
  45.217 -            return -EINVAL;
  45.218 -
  45.219 -        dom = op->u.createdomain.domain;
  45.220 -        if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
  45.221 -        {
  45.222 -            ret = -EINVAL;
  45.223 -            if ( !is_free_domid(dom) )
  45.224 -                break;
  45.225 -        }
  45.226 -        else
  45.227 -        {
  45.228 -            for ( dom = rover + 1; dom != rover; dom++ )
  45.229 -            {
  45.230 -                if ( dom == DOMID_FIRST_RESERVED )
  45.231 -                    dom = 0;
  45.232 -                if ( is_free_domid(dom) )
  45.233 -                    break;
  45.234 -            }
  45.235 -
  45.236 -            ret = -ENOMEM;
  45.237 -            if ( dom == rover )
  45.238 -                break;
  45.239 -
  45.240 -            rover = dom;
  45.241 -        }
  45.242 -
  45.243 -        ret = -ENOMEM;
  45.244 -        if ( (d = domain_create(dom)) == NULL )
  45.245 -            break;
  45.246 -
  45.247 -        memcpy(d->handle, op->u.createdomain.handle,
  45.248 -               sizeof(xen_domain_handle_t));
  45.249 -
  45.250 -        ret = 0;
  45.251 -
  45.252 -        op->u.createdomain.domain = d->domain_id;
  45.253 -        if ( copy_to_guest(u_dom0_op, op, 1) )
  45.254 -            ret = -EFAULT;
  45.255 -    }
  45.256 -    break;
  45.257 -
  45.258 -    case DOM0_MAX_VCPUS:
  45.259 -    {
  45.260 -        struct domain *d;
  45.261 -        unsigned int i, max = op->u.max_vcpus.max, cpu;
  45.262 -
  45.263 -        ret = -EINVAL;
  45.264 -        if ( max > MAX_VIRT_CPUS )
  45.265 -            break;
  45.266 -
  45.267 -        ret = -ESRCH;
  45.268 -        if ( (d = find_domain_by_id(op->u.max_vcpus.domain)) == NULL )
  45.269 -            break;
  45.270 -
  45.271 -        /* Needed, for example, to ensure writable p.t. state is synced. */
  45.272 -        domain_pause(d);
  45.273 -
  45.274 -        /* We cannot reduce maximum VCPUs. */
  45.275 -        ret = -EINVAL;
  45.276 -        if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
  45.277 -            goto maxvcpu_out;
  45.278 -
  45.279 -        ret = -ENOMEM;
  45.280 -        for ( i = 0; i < max; i++ )
  45.281 -        {
  45.282 -            if ( d->vcpu[i] != NULL )
  45.283 -                continue;
  45.284 -
  45.285 -            cpu = (i == 0) ?
  45.286 -                default_vcpu0_location() :
  45.287 -                (d->vcpu[i-1]->processor + 1) % num_online_cpus();
  45.288 -
  45.289 -            if ( alloc_vcpu(d, i, cpu) == NULL )
  45.290 -                goto maxvcpu_out;
  45.291 -        }
  45.292 -
  45.293 -        ret = 0;
  45.294 -
  45.295 -    maxvcpu_out:
  45.296 -        domain_unpause(d);
  45.297 -        put_domain(d);
  45.298 -    }
  45.299 -    break;
  45.300 -
  45.301 -    case DOM0_DESTROYDOMAIN:
  45.302 -    {
  45.303 -        struct domain *d = find_domain_by_id(op->u.destroydomain.domain);
  45.304 -        ret = -ESRCH;
  45.305 -        if ( d != NULL )
  45.306 -        {
  45.307 -            ret = -EINVAL;
  45.308 -            if ( d != current->domain )
  45.309 -            {
  45.310 -                domain_kill(d);
  45.311 -                ret = 0;
  45.312 -            }
  45.313 -            put_domain(d);
  45.314 -        }
  45.315 -    }
  45.316 -    break;
  45.317 -
  45.318 -    case DOM0_SETVCPUAFFINITY:
  45.319 -    {
  45.320 -        domid_t dom = op->u.setvcpuaffinity.domain;
  45.321 -        struct domain *d = find_domain_by_id(dom);
  45.322 -        struct vcpu *v;
  45.323 -        cpumask_t new_affinity;
  45.324 -
  45.325 -        if ( d == NULL )
  45.326 -        {
  45.327 -            ret = -ESRCH;            
  45.328 -            break;
  45.329 -        }
  45.330 -        
  45.331 -        if ( (op->u.setvcpuaffinity.vcpu >= MAX_VIRT_CPUS) ||
  45.332 -             !d->vcpu[op->u.setvcpuaffinity.vcpu] )
  45.333 -        {
  45.334 -            ret = -EINVAL;
  45.335 -            put_domain(d);
  45.336 -            break;
  45.337 -        }
  45.338 -
  45.339 -        v = d->vcpu[op->u.setvcpuaffinity.vcpu];
  45.340 -        if ( v == NULL )
  45.341 -        {
  45.342 -            ret = -ESRCH;
  45.343 -            put_domain(d);
  45.344 -            break;
  45.345 -        }
  45.346 -
  45.347 -        if ( v == current )
  45.348 -        {
  45.349 -            ret = -EINVAL;
  45.350 -            put_domain(d);
  45.351 -            break;
  45.352 -        }
  45.353 -
  45.354 -        new_affinity = v->cpu_affinity;
  45.355 -        memcpy(cpus_addr(new_affinity),
  45.356 -               &op->u.setvcpuaffinity.cpumap,
  45.357 -               min((int)(BITS_TO_LONGS(NR_CPUS) * sizeof(long)),
  45.358 -                   (int)sizeof(op->u.setvcpuaffinity.cpumap)));
  45.359 -
  45.360 -        ret = vcpu_set_affinity(v, &new_affinity);
  45.361 -
  45.362 -        put_domain(d);
  45.363 -    }
  45.364 -    break;
  45.365 -
  45.366 -    case DOM0_SCHEDCTL:
  45.367 -    {
  45.368 -        ret = sched_ctl(&op->u.schedctl);
  45.369 -        if ( copy_to_guest(u_dom0_op, op, 1) )
  45.370 -            ret = -EFAULT;
  45.371 -    }
  45.372 -    break;
  45.373 -
  45.374 -    case DOM0_ADJUSTDOM:
  45.375 -    {
  45.376 -        ret = sched_adjdom(&op->u.adjustdom);
  45.377 -        if ( copy_to_guest(u_dom0_op, op, 1) )
  45.378 -            ret = -EFAULT;
  45.379 -    }
  45.380 -    break;
  45.381 -
  45.382 -    case DOM0_GETDOMAININFO:
  45.383 -    { 
  45.384 -        struct domain *d;
  45.385 -        domid_t dom;
  45.386 -
  45.387 -        dom = op->u.getdomaininfo.domain;
  45.388 -        if ( dom == DOMID_SELF )
  45.389 -            dom = current->domain->domain_id;
  45.390 -
  45.391 -        read_lock(&domlist_lock);
  45.392 -
  45.393 -        for_each_domain ( d )
  45.394 -        {
  45.395 -            if ( d->domain_id >= dom )
  45.396 -                break;
  45.397 -        }
  45.398 -
  45.399 -        if ( (d == NULL) || !get_domain(d) )
  45.400 -        {
  45.401 -            read_unlock(&domlist_lock);
  45.402 -            ret = -ESRCH;
  45.403 -            break;
  45.404 -        }
  45.405 -
  45.406 -        read_unlock(&domlist_lock);
  45.407 -
  45.408 -        getdomaininfo(d, &op->u.getdomaininfo);
  45.409 -
  45.410 -        if ( copy_to_guest(u_dom0_op, op, 1) )
  45.411 -            ret = -EFAULT;
  45.412 -
  45.413 -        put_domain(d);
  45.414 -    }
  45.415 -    break;
  45.416 -
  45.417 -    case DOM0_GETDOMAININFOLIST:
  45.418 -    { 
  45.419 -        struct domain *d;
  45.420 -        dom0_getdomaininfo_t info;
  45.421 -        u32 num_domains = 0;
  45.422 -
  45.423 -        read_lock(&domlist_lock);
  45.424 -
  45.425 -        for_each_domain ( d )
  45.426 -        {
  45.427 -            if ( d->domain_id < op->u.getdomaininfolist.first_domain )
  45.428 -                continue;
  45.429 -            if ( num_domains == op->u.getdomaininfolist.max_domains )
  45.430 -                break;
  45.431 -            if ( (d == NULL) || !get_domain(d) )
  45.432 -            {
  45.433 -                ret = -ESRCH;
  45.434 -                break;
  45.435 -            }
  45.436 -
  45.437 -            getdomaininfo(d, &info);
  45.438 -
  45.439 -            put_domain(d);
  45.440 -
  45.441 -            if ( copy_to_guest_offset(op->u.getdomaininfolist.buffer,
  45.442 -                                      num_domains, &info, 1) )
  45.443 -            {
  45.444 -                ret = -EFAULT;
  45.445 -                break;
  45.446 -            }
  45.447 -            
  45.448 -            num_domains++;
  45.449 -        }
  45.450 -        
  45.451 -        read_unlock(&domlist_lock);
  45.452 -        
  45.453 -        if ( ret != 0 )
  45.454 -            break;
  45.455 -        
  45.456 -        op->u.getdomaininfolist.num_domains = num_domains;
  45.457 -
  45.458 -        if ( copy_to_guest(u_dom0_op, op, 1) )
  45.459 -            ret = -EFAULT;
  45.460 -    }
  45.461 -    break;
  45.462 -
  45.463 -    case DOM0_GETVCPUCONTEXT:
  45.464 -    { 
  45.465 -        struct vcpu_guest_context *c;
  45.466 -        struct domain             *d;
  45.467 -        struct vcpu               *v;
  45.468 -
  45.469 -        ret = -ESRCH;
  45.470 -        if ( (d = find_domain_by_id(op->u.getvcpucontext.domain)) == NULL )
  45.471 -            break;
  45.472 -
  45.473 -        ret = -EINVAL;
  45.474 -        if ( op->u.getvcpucontext.vcpu >= MAX_VIRT_CPUS )
  45.475 -            goto getvcpucontext_out;
  45.476 -
  45.477 -        ret = -ESRCH;
  45.478 -        if ( (v = d->vcpu[op->u.getvcpucontext.vcpu]) == NULL )
  45.479 -            goto getvcpucontext_out;
  45.480 -
  45.481 -        ret = -ENODATA;
  45.482 -        if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
  45.483 -            goto getvcpucontext_out;
  45.484 -
  45.485 -        ret = -ENOMEM;
  45.486 -        if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
  45.487 -            goto getvcpucontext_out;
  45.488 -
  45.489 -        if ( v != current )
  45.490 -            vcpu_pause(v);
  45.491 -
  45.492 -        arch_getdomaininfo_ctxt(v,c);
  45.493 -        ret = 0;
  45.494 -
  45.495 -        if ( v != current )
  45.496 -            vcpu_unpause(v);
  45.497 -
  45.498 -        if ( copy_to_guest(op->u.getvcpucontext.ctxt, c, 1) )
  45.499 -            ret = -EFAULT;
  45.500 -
  45.501 -        xfree(c);
  45.502 -
  45.503 -        if ( copy_to_guest(u_dom0_op, op, 1) )
  45.504 -            ret = -EFAULT;
  45.505 -
  45.506 -    getvcpucontext_out:
  45.507 -        put_domain(d);
  45.508 -    }
  45.509 -    break;
  45.510 -
  45.511 -    case DOM0_GETVCPUINFO:
  45.512 -    { 
  45.513 -        struct domain *d;
  45.514 -        struct vcpu   *v;
  45.515 -        struct vcpu_runstate_info runstate;
  45.516 -
  45.517 -        ret = -ESRCH;
  45.518 -        if ( (d = find_domain_by_id(op->u.getvcpuinfo.domain)) == NULL )
  45.519 -            break;
  45.520 -
  45.521 -        ret = -EINVAL;
  45.522 -        if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
  45.523 -            goto getvcpuinfo_out;
  45.524 -
  45.525 -        ret = -ESRCH;
  45.526 -        if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
  45.527 -            goto getvcpuinfo_out;
  45.528 -
  45.529 -        vcpu_runstate_get(v, &runstate);
  45.530 -
  45.531 -        op->u.getvcpuinfo.online   = !test_bit(_VCPUF_down, &v->vcpu_flags);
  45.532 -        op->u.getvcpuinfo.blocked  = test_bit(_VCPUF_blocked, &v->vcpu_flags);
  45.533 -        op->u.getvcpuinfo.running  = test_bit(_VCPUF_running, &v->vcpu_flags);
  45.534 -        op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
  45.535 -        op->u.getvcpuinfo.cpu      = v->processor;
  45.536 -        op->u.getvcpuinfo.cpumap   = 0;
  45.537 -        memcpy(&op->u.getvcpuinfo.cpumap,
  45.538 -               cpus_addr(v->cpu_affinity),
  45.539 -               min((int)(BITS_TO_LONGS(NR_CPUS) * sizeof(long)),
  45.540 -                   (int)sizeof(op->u.getvcpuinfo.cpumap)));
  45.541 -        ret = 0;
  45.542 -
  45.543 -        if ( copy_to_guest(u_dom0_op, op, 1) )
  45.544 -            ret = -EFAULT;
  45.545 -
  45.546 -    getvcpuinfo_out:
  45.547 -        put_domain(d);
  45.548 -    }
  45.549 -    break;
  45.550 -
  45.551 -    case DOM0_SETTIME:
  45.552 -    {
  45.553 -        do_settime(op->u.settime.secs, 
  45.554 -                   op->u.settime.nsecs, 
  45.555 -                   op->u.settime.system_time);
  45.556 -        ret = 0;
  45.557 -    }
  45.558 -    break;
  45.559 -
  45.560 -    case DOM0_TBUFCONTROL:
  45.561 -    {
  45.562 -        ret = tb_control(&op->u.tbufcontrol);
  45.563 -        if ( copy_to_guest(u_dom0_op, op, 1) )
  45.564 -            ret = -EFAULT;
  45.565 -    }
  45.566 -    break;
  45.567 -    
  45.568 -    case DOM0_READCONSOLE:
  45.569 -    {
  45.570 -        ret = read_console_ring(
  45.571 -            op->u.readconsole.buffer, 
  45.572 -            &op->u.readconsole.count,
  45.573 -            op->u.readconsole.clear);
  45.574 -        if ( copy_to_guest(u_dom0_op, op, 1) )
  45.575 -            ret = -EFAULT;
  45.576 -    }
  45.577 -    break;
  45.578 -
  45.579 -    case DOM0_SCHED_ID:
  45.580 -    {
  45.581 -        op->u.sched_id.sched_id = sched_id();
  45.582 -        if ( copy_to_guest(u_dom0_op, op, 1) )
  45.583 -            ret = -EFAULT;
  45.584 -        else
  45.585 -            ret = 0;
  45.586 -    }
  45.587 -    break;
  45.588 -
  45.589 -    case DOM0_SETDOMAINMAXMEM:
  45.590 -    {
  45.591 -        struct domain *d;
  45.592 -        unsigned long new_max;
  45.593 -
  45.594 -        ret = -ESRCH;
  45.595 -        d = find_domain_by_id(op->u.setdomainmaxmem.domain);
  45.596 -        if ( d == NULL )
  45.597 -            break;
  45.598 -
  45.599 -        ret = -EINVAL;
  45.600 -        new_max = op->u.setdomainmaxmem.max_memkb >> (PAGE_SHIFT-10);
  45.601 -
  45.602 -        spin_lock(&d->page_alloc_lock);
  45.603 -        if ( new_max >= d->tot_pages )
  45.604 -        {
  45.605 -            d->max_pages = new_max;
  45.606 -            ret = 0;
  45.607 -        }
  45.608 -        spin_unlock(&d->page_alloc_lock);
  45.609 -
  45.610 -        put_domain(d);
  45.611 -    }
  45.612 -    break;
  45.613 -
  45.614 -    case DOM0_SETDOMAINHANDLE:
  45.615 -    {
  45.616 -        struct domain *d;
  45.617 -        ret = -ESRCH;
  45.618 -        d = find_domain_by_id(op->u.setdomainhandle.domain);
  45.619 -        if ( d != NULL )
  45.620 -        {
  45.621 -            memcpy(d->handle, op->u.setdomainhandle.handle,
  45.622 -                   sizeof(xen_domain_handle_t));
  45.623 -            put_domain(d);
  45.624 -            ret = 0;
  45.625 -        }
  45.626 -    }
  45.627 -    break;
  45.628 -
  45.629 -    case DOM0_SETDEBUGGING:
  45.630 -    {
  45.631 -        struct domain *d;
  45.632 -        ret = -ESRCH;
  45.633 -        d = find_domain_by_id(op->u.setdebugging.domain);
  45.634 -        if ( d != NULL )
  45.635 -        {
  45.636 -            if ( op->u.setdebugging.enable )
  45.637 -                set_bit(_DOMF_debugging, &d->domain_flags);
  45.638 -            else
  45.639 -                clear_bit(_DOMF_debugging, &d->domain_flags);
  45.640 -            put_domain(d);
  45.641 -            ret = 0;
  45.642 -        }
  45.643 -    }
  45.644 -    break;
  45.645 -
  45.646 -    case DOM0_IRQ_PERMISSION:
  45.647 -    {
  45.648 -        struct domain *d;
  45.649 -        unsigned int pirq = op->u.irq_permission.pirq;
  45.650 -
  45.651 -        ret = -EINVAL;
  45.652 -        if ( pirq >= NR_IRQS )
  45.653 -            break;
  45.654 -
  45.655 -        ret = -ESRCH;
  45.656 -        d = find_domain_by_id(op->u.irq_permission.domain);
  45.657 -        if ( d == NULL )
  45.658 -            break;
  45.659 -
  45.660 -        if ( op->u.irq_permission.allow_access )
  45.661 -            ret = irq_permit_access(d, pirq);
  45.662 -        else
  45.663 -            ret = irq_deny_access(d, pirq);
  45.664 -
  45.665 -        put_domain(d);
  45.666 -    }
  45.667 -    break;
  45.668 -
  45.669 -    case DOM0_IOMEM_PERMISSION:
  45.670 -    {
  45.671 -        struct domain *d;
  45.672 -        unsigned long mfn = op->u.iomem_permission.first_mfn;
  45.673 -        unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
  45.674 -
  45.675 -        ret = -EINVAL;
  45.676 -        if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
  45.677 -            break;
  45.678 -
  45.679 -        ret = -ESRCH;
  45.680 -        d = find_domain_by_id(op->u.iomem_permission.domain);
  45.681 -        if ( d == NULL )
  45.682 -            break;
  45.683 -
  45.684 -        if ( op->u.iomem_permission.allow_access )
  45.685 -            ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
  45.686 -        else
  45.687 -            ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
  45.688 -
  45.689 -        put_domain(d);
  45.690 -    }
  45.691 -    break;
  45.692 -
  45.693 -#ifdef PERF_COUNTERS
  45.694 -    case DOM0_PERFCCONTROL:
  45.695 -    {
  45.696 -        extern int perfc_control(dom0_perfccontrol_t *);
  45.697 -        ret = perfc_control(&op->u.perfccontrol);
  45.698 -        if ( copy_to_guest(u_dom0_op, op, 1) )
  45.699 -            ret = -EFAULT;
  45.700 -    }
  45.701 -    break;
  45.702 -#endif
  45.703 -
  45.704 -    case DOM0_SETTIMEOFFSET:
  45.705 -    {
  45.706 -        struct domain *d;
  45.707 -
  45.708 -        ret = -ESRCH;
  45.709 -        d = find_domain_by_id(op->u.settimeoffset.domain);
  45.710 -        if ( d != NULL )
  45.711 -        {
  45.712 -            d->time_offset_seconds = op->u.settimeoffset.time_offset_seconds;
  45.713 -            put_domain(d);
  45.714 -            ret = 0;
  45.715 -        }
  45.716 -    }
  45.717 -    break;
  45.718 -
  45.719 -    default:
  45.720 -        ret = arch_do_dom0_op(op, u_dom0_op);
  45.721 -        break;
  45.722 -    }
  45.723 -
  45.724 -    spin_unlock(&dom0_lock);
  45.725 -
  45.726 -    if (!ret)
  45.727 -        acm_post_dom0_op(op, &ssid);
  45.728 -    else
  45.729 -        acm_fail_dom0_op(op, &ssid);
  45.730 -
  45.731 -    return ret;
  45.732 -}
  45.733 -
  45.734 -/*
  45.735 - * Local variables:
  45.736 - * mode: C
  45.737 - * c-set-style: "BSD"
  45.738 - * c-basic-offset: 4
  45.739 - * tab-width: 4
  45.740 - * indent-tabs-mode: nil
  45.741 - * End:
  45.742 - */
    46.1 --- a/xen/common/domain.c	Fri Aug 25 10:39:24 2006 +0100
    46.2 +++ b/xen/common/domain.c	Fri Aug 25 18:39:10 2006 +0100
    46.3 @@ -23,7 +23,6 @@
    46.4  #include <xen/shutdown.h>
    46.5  #include <xen/percpu.h>
    46.6  #include <asm/debugger.h>
    46.7 -#include <public/dom0_ops.h>
    46.8  #include <public/sched.h>
    46.9  #include <public/vcpu.h>
   46.10  
   46.11 @@ -454,11 +453,12 @@ void domain_unpause_by_systemcontroller(
   46.12   * of domains other than domain 0. ie. the domains that are being built by 
   46.13   * the userspace dom0 domain builder.
   46.14   */
   46.15 -int set_info_guest(struct domain *d, dom0_setvcpucontext_t *setvcpucontext)
   46.16 +int set_info_guest(struct domain *d,
   46.17 +                   xen_domctl_vcpucontext_t *vcpucontext)
   46.18  {
   46.19      int rc = 0;
   46.20      struct vcpu_guest_context *c = NULL;
   46.21 -    unsigned long vcpu = setvcpucontext->vcpu;
   46.22 +    unsigned long vcpu = vcpucontext->vcpu;
   46.23      struct vcpu *v;
   46.24  
   46.25      if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
   46.26 @@ -470,7 +470,7 @@ int set_info_guest(struct domain *d, dom
   46.27      domain_pause(d);
   46.28  
   46.29      rc = -EFAULT;
   46.30 -    if ( copy_from_guest(c, setvcpucontext->ctxt, 1) == 0 )
   46.31 +    if ( copy_from_guest(c, vcpucontext->ctxt, 1) == 0 )
   46.32          rc = arch_set_info_guest(v, c);
   46.33  
   46.34      domain_unpause(d);
    47.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    47.2 +++ b/xen/common/domctl.c	Fri Aug 25 18:39:10 2006 +0100
    47.3 @@ -0,0 +1,681 @@
    47.4 +/******************************************************************************
    47.5 + * domctl.c
    47.6 + * 
    47.7 + * Domain management operations. For use by node control stack.
    47.8 + * 
    47.9 + * Copyright (c) 2002-2006, K A Fraser
   47.10 + */
   47.11 +
   47.12 +#include <xen/config.h>
   47.13 +#include <xen/types.h>
   47.14 +#include <xen/lib.h>
   47.15 +#include <xen/mm.h>
   47.16 +#include <xen/sched.h>
   47.17 +#include <xen/domain.h>
   47.18 +#include <xen/event.h>
   47.19 +#include <xen/domain_page.h>
   47.20 +#include <xen/trace.h>
   47.21 +#include <xen/console.h>
   47.22 +#include <xen/iocap.h>
   47.23 +#include <xen/guest_access.h>
   47.24 +#include <asm/current.h>
   47.25 +#include <public/domctl.h>
   47.26 +#include <acm/acm_hooks.h>
   47.27 +
   47.28 +extern long arch_do_domctl(
   47.29 +    struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
   47.30 +extern void arch_getdomaininfo_ctxt(
   47.31 +    struct vcpu *, struct vcpu_guest_context *);
   47.32 +
   47.33 +void cpumask_to_xenctl_cpumap(
   47.34 +    struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
   47.35 +{
   47.36 +    unsigned int guest_bytes, copy_bytes, i;
   47.37 +    uint8_t zero = 0;
   47.38 +
   47.39 +    if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
   47.40 +        return;
   47.41 +
   47.42 +    guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
   47.43 +    copy_bytes  = min_t(unsigned int, guest_bytes, (NR_CPUS + 7) / 8);
   47.44 +
   47.45 +    copy_to_guest(xenctl_cpumap->bitmap,
   47.46 +                  (uint8_t *)cpus_addr(*cpumask),
   47.47 +                  copy_bytes);
   47.48 +
   47.49 +    for ( i = copy_bytes; i < guest_bytes; i++ )
   47.50 +        copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1);
   47.51 +}
   47.52 +
   47.53 +void xenctl_cpumap_to_cpumask(
   47.54 +    cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
   47.55 +{
   47.56 +    unsigned int guest_bytes, copy_bytes;
   47.57 +
   47.58 +    guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
   47.59 +    copy_bytes  = min_t(unsigned int, guest_bytes, (NR_CPUS + 7) / 8);
   47.60 +
   47.61 +    cpus_clear(*cpumask);
   47.62 +
   47.63 +    if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
   47.64 +        return;
   47.65 +
   47.66 +    copy_from_guest((uint8_t *)cpus_addr(*cpumask),
   47.67 +                    xenctl_cpumap->bitmap,
   47.68 +                    copy_bytes);
   47.69 +}
   47.70 +
   47.71 +static inline int is_free_domid(domid_t dom)
   47.72 +{
   47.73 +    struct domain *d;
   47.74 +
   47.75 +    if ( dom >= DOMID_FIRST_RESERVED )
   47.76 +        return 0;
   47.77 +
   47.78 +    if ( (d = find_domain_by_id(dom)) == NULL )
   47.79 +        return 1;
   47.80 +
   47.81 +    put_domain(d);
   47.82 +    return 0;
   47.83 +}
   47.84 +
   47.85 +void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
   47.86 +{
   47.87 +    struct vcpu   *v;
   47.88 +    u64 cpu_time = 0;
   47.89 +    int flags = DOMFLAGS_BLOCKED;
   47.90 +    struct vcpu_runstate_info runstate;
   47.91 +    
   47.92 +    info->domain = d->domain_id;
   47.93 +    info->nr_online_vcpus = 0;
   47.94 +    
   47.95 +    /* 
   47.96 +     * - domain is marked as blocked only if all its vcpus are blocked
   47.97 +     * - domain is marked as running if any of its vcpus is running
   47.98 +     */
   47.99 +    for_each_vcpu ( d, v ) {
  47.100 +        vcpu_runstate_get(v, &runstate);
  47.101 +        cpu_time += runstate.time[RUNSTATE_running];
  47.102 +        info->max_vcpu_id = v->vcpu_id;
  47.103 +        if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
  47.104 +        {
  47.105 +            if ( !(v->vcpu_flags & VCPUF_blocked) )
  47.106 +                flags &= ~DOMFLAGS_BLOCKED;
  47.107 +            if ( v->vcpu_flags & VCPUF_running )
  47.108 +                flags |= DOMFLAGS_RUNNING;
  47.109 +            info->nr_online_vcpus++;
  47.110 +        }
  47.111 +    }
  47.112 +    
  47.113 +    info->cpu_time = cpu_time;
  47.114 +    
  47.115 +    info->flags = flags |
  47.116 +        ((d->domain_flags & DOMF_dying)      ? DOMFLAGS_DYING    : 0) |
  47.117 +        ((d->domain_flags & DOMF_shutdown)   ? DOMFLAGS_SHUTDOWN : 0) |
  47.118 +        ((d->domain_flags & DOMF_ctrl_pause) ? DOMFLAGS_PAUSED   : 0) |
  47.119 +        d->shutdown_code << DOMFLAGS_SHUTDOWNSHIFT;
  47.120 +
  47.121 +    if (d->ssid != NULL)
  47.122 +        info->ssidref = ((struct acm_ssid_domain *)d->ssid)->ssidref;
  47.123 +    else    
  47.124 +        info->ssidref = ACM_DEFAULT_SSID;
  47.125 +    
  47.126 +    info->tot_pages         = d->tot_pages;
  47.127 +    info->max_pages         = d->max_pages;
  47.128 +    info->shared_info_frame = __pa(d->shared_info) >> PAGE_SHIFT;
  47.129 +
  47.130 +    memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
  47.131 +}
  47.132 +
  47.133 +static unsigned int default_vcpu0_location(void)
  47.134 +{
  47.135 +    struct domain *d;
  47.136 +    struct vcpu   *v;
  47.137 +    unsigned int   i, cpu, cnt[NR_CPUS] = { 0 };
  47.138 +    cpumask_t      cpu_exclude_map;
  47.139 +
  47.140 +    /* Do an initial CPU placement. Pick the least-populated CPU. */
  47.141 +    read_lock(&domlist_lock);
  47.142 +    for_each_domain ( d )
  47.143 +        for_each_vcpu ( d, v )
  47.144 +        if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
  47.145 +            cnt[v->processor]++;
  47.146 +    read_unlock(&domlist_lock);
  47.147 +
  47.148 +    /*
  47.149 +     * If we're on a HT system, we only auto-allocate to a non-primary HT. We 
  47.150 +     * favour high numbered CPUs in the event of a tie.
  47.151 +     */
  47.152 +    cpu = first_cpu(cpu_sibling_map[0]);
  47.153 +    if ( cpus_weight(cpu_sibling_map[0]) > 1 )
  47.154 +        cpu = next_cpu(cpu, cpu_sibling_map[0]);
  47.155 +    cpu_exclude_map = cpu_sibling_map[0];
  47.156 +    for_each_online_cpu ( i )
  47.157 +    {
  47.158 +        if ( cpu_isset(i, cpu_exclude_map) )
  47.159 +            continue;
  47.160 +        if ( (i == first_cpu(cpu_sibling_map[i])) &&
  47.161 +             (cpus_weight(cpu_sibling_map[i]) > 1) )
  47.162 +            continue;
  47.163 +        cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
  47.164 +        if ( cnt[i] <= cnt[cpu] )
  47.165 +            cpu = i;
  47.166 +    }
  47.167 +
  47.168 +    return cpu;
  47.169 +}
  47.170 +
  47.171 +long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
  47.172 +{
  47.173 +    long ret = 0;
  47.174 +    struct xen_domctl curop, *op = &curop;
  47.175 +    void *ssid = NULL; /* save security ptr between pre and post/fail hooks */
  47.176 +    static DEFINE_SPINLOCK(domctl_lock);
  47.177 +
  47.178 +    if ( !IS_PRIV(current->domain) )
  47.179 +        return -EPERM;
  47.180 +
  47.181 +    if ( copy_from_guest(op, u_domctl, 1) )
  47.182 +        return -EFAULT;
  47.183 +
  47.184 +    if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
  47.185 +        return -EACCES;
  47.186 +
  47.187 +    if ( acm_pre_domctl(op, &ssid) )
  47.188 +        return -EPERM;
  47.189 +
  47.190 +    spin_lock(&domctl_lock);
  47.191 +
  47.192 +    switch ( op->cmd )
  47.193 +    {
  47.194 +
  47.195 +    case XEN_DOMCTL_setvcpucontext:
  47.196 +    {
  47.197 +        struct domain *d = find_domain_by_id(op->domain);
  47.198 +        ret = -ESRCH;
  47.199 +        if ( d != NULL )
  47.200 +        {
  47.201 +            ret = set_info_guest(d, &op->u.vcpucontext);
  47.202 +            put_domain(d);
  47.203 +        }
  47.204 +    }
  47.205 +    break;
  47.206 +
  47.207 +    case XEN_DOMCTL_pausedomain:
  47.208 +    {
  47.209 +        struct domain *d = find_domain_by_id(op->domain);
  47.210 +        ret = -ESRCH;
  47.211 +        if ( d != NULL )
  47.212 +        {
  47.213 +            ret = -EINVAL;
  47.214 +            if ( d != current->domain )
  47.215 +            {
  47.216 +                domain_pause_by_systemcontroller(d);
  47.217 +                ret = 0;
  47.218 +            }
  47.219 +            put_domain(d);
  47.220 +        }
  47.221 +    }
  47.222 +    break;
  47.223 +
  47.224 +    case XEN_DOMCTL_unpausedomain:
  47.225 +    {
  47.226 +        struct domain *d = find_domain_by_id(op->domain);
  47.227 +        ret = -ESRCH;
  47.228 +        if ( d != NULL )
  47.229 +        {
  47.230 +            ret = -EINVAL;
  47.231 +            if ( (d != current->domain) && (d->vcpu[0] != NULL) &&
  47.232 +                 test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
  47.233 +            {
  47.234 +                domain_unpause_by_systemcontroller(d);
  47.235 +                ret = 0;
  47.236 +            }
  47.237 +            put_domain(d);
  47.238 +        }
  47.239 +    }
  47.240 +    break;
  47.241 +
  47.242 +    case XEN_DOMCTL_createdomain:
  47.243 +    {
  47.244 +        struct domain *d;
  47.245 +        domid_t        dom;
  47.246 +        static domid_t rover = 0;
  47.247 +
  47.248 +        /*
  47.249 +         * Running the domain 0 kernel in ring 0 is not compatible
  47.250 +         * with multiple guests.
  47.251 +         */
  47.252 +        if ( supervisor_mode_kernel )
  47.253 +            return -EINVAL;
  47.254 +
  47.255 +        dom = op->domain;
  47.256 +        if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
  47.257 +        {
  47.258 +            ret = -EINVAL;
  47.259 +            if ( !is_free_domid(dom) )
  47.260 +                break;
  47.261 +        }
  47.262 +        else
  47.263 +        {
  47.264 +            for ( dom = rover + 1; dom != rover; dom++ )
  47.265 +            {
  47.266 +                if ( dom == DOMID_FIRST_RESERVED )
  47.267 +                    dom = 0;
  47.268 +                if ( is_free_domid(dom) )
  47.269 +                    break;
  47.270 +            }
  47.271 +
  47.272 +            ret = -ENOMEM;
  47.273 +            if ( dom == rover )
  47.274 +                break;
  47.275 +
  47.276 +            rover = dom;
  47.277 +        }
  47.278 +
  47.279 +        ret = -ENOMEM;
  47.280 +        if ( (d = domain_create(dom)) == NULL )
  47.281 +            break;
  47.282 +
  47.283 +        memcpy(d->handle, op->u.createdomain.handle,
  47.284 +               sizeof(xen_domain_handle_t));
  47.285 +
  47.286 +        ret = 0;
  47.287 +
  47.288 +        op->domain = d->domain_id;
  47.289 +        if ( copy_to_guest(u_domctl, op, 1) )
  47.290 +            ret = -EFAULT;
  47.291 +    }
  47.292 +    break;
  47.293 +
  47.294 +    case XEN_DOMCTL_max_vcpus:
  47.295 +    {
  47.296 +        struct domain *d;
  47.297 +        unsigned int i, max = op->u.max_vcpus.max, cpu;
  47.298 +
  47.299 +        ret = -EINVAL;
  47.300 +        if ( max > MAX_VIRT_CPUS )
  47.301 +            break;
  47.302 +
  47.303 +        ret = -ESRCH;
  47.304 +        if ( (d = find_domain_by_id(op->domain)) == NULL )
  47.305 +            break;
  47.306 +
  47.307 +        /* Needed, for example, to ensure writable p.t. state is synced. */
  47.308 +        domain_pause(d);
  47.309 +
  47.310 +        /* We cannot reduce maximum VCPUs. */
  47.311 +        ret = -EINVAL;
  47.312 +        if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
  47.313 +            goto maxvcpu_out;
  47.314 +
  47.315 +        ret = -ENOMEM;
  47.316 +        for ( i = 0; i < max; i++ )
  47.317 +        {
  47.318 +            if ( d->vcpu[i] != NULL )
  47.319 +                continue;
  47.320 +
  47.321 +            cpu = (i == 0) ?
  47.322 +                default_vcpu0_location() :
  47.323 +                (d->vcpu[i-1]->processor + 1) % num_online_cpus();
  47.324 +
  47.325 +            if ( alloc_vcpu(d, i, cpu) == NULL )
  47.326 +                goto maxvcpu_out;
  47.327 +        }
  47.328 +
  47.329 +        ret = 0;
  47.330 +
  47.331 +    maxvcpu_out:
  47.332 +        domain_unpause(d);
  47.333 +        put_domain(d);
  47.334 +    }
  47.335 +    break;
  47.336 +
  47.337 +    case XEN_DOMCTL_destroydomain:
  47.338 +    {
  47.339 +        struct domain *d = find_domain_by_id(op->domain);
  47.340 +        ret = -ESRCH;
  47.341 +        if ( d != NULL )
  47.342 +        {
  47.343 +            ret = -EINVAL;
  47.344 +            if ( d != current->domain )
  47.345 +            {
  47.346 +                domain_kill(d);
  47.347 +                ret = 0;
  47.348 +            }
  47.349 +            put_domain(d);
  47.350 +        }
  47.351 +    }
  47.352 +    break;
  47.353 +
  47.354 +    case XEN_DOMCTL_setvcpuaffinity:
  47.355 +    case XEN_DOMCTL_getvcpuaffinity:
  47.356 +    {
  47.357 +        domid_t dom = op->domain;
  47.358 +        struct domain *d = find_domain_by_id(dom);
  47.359 +        struct vcpu *v;
  47.360 +        cpumask_t new_affinity;
  47.361 +
  47.362 +        if ( d == NULL )
  47.363 +        {
  47.364 +            ret = -ESRCH;            
  47.365 +            break;
  47.366 +        }
  47.367 +        
  47.368 +        if ( (op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS) ||
  47.369 +             !d->vcpu[op->u.vcpuaffinity.vcpu] )
  47.370 +        {
  47.371 +            ret = -EINVAL;
  47.372 +            put_domain(d);
  47.373 +            break;
  47.374 +        }
  47.375 +
  47.376 +        v = d->vcpu[op->u.vcpuaffinity.vcpu];
  47.377 +        if ( v == NULL )
  47.378 +        {
  47.379 +            ret = -ESRCH;
  47.380 +            put_domain(d);
  47.381 +            break;
  47.382 +        }
  47.383 +
  47.384 +        if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
  47.385 +        {
  47.386 +            if ( v == current )
  47.387 +            {
  47.388 +                ret = -EINVAL;
  47.389 +                put_domain(d);
  47.390 +                break;
  47.391 +            }
  47.392 +
  47.393 +            xenctl_cpumap_to_cpumask(
  47.394 +                &new_affinity, &op->u.vcpuaffinity.cpumap);
  47.395 +            ret = vcpu_set_affinity(v, &new_affinity);
  47.396 +        }
  47.397 +        else
  47.398 +        {
  47.399 +            cpumask_to_xenctl_cpumap(
  47.400 +                &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
  47.401 +        }
  47.402 +
  47.403 +        put_domain(d);
  47.404 +    }
  47.405 +    break;
  47.406 +
  47.407 +    case XEN_DOMCTL_scheduler_op:
  47.408 +    {
  47.409 +        struct domain *d;
  47.410 +
  47.411 +        ret = -ESRCH;
  47.412 +        if ( (d = find_domain_by_id(op->domain)) == NULL )
  47.413 +            break;
  47.414 +
  47.415 +        ret = sched_adjust(d, &op->u.scheduler_op);
  47.416 +        if ( copy_to_guest(u_domctl, op, 1) )
  47.417 +            ret = -EFAULT;
  47.418 +
  47.419 +        put_domain(d);
  47.420 +    }
  47.421 +    break;
  47.422 +
  47.423 +    case XEN_DOMCTL_getdomaininfo:
  47.424 +    { 
  47.425 +        struct domain *d;
  47.426 +        domid_t dom;
  47.427 +
  47.428 +        dom = op->domain;
  47.429 +        if ( dom == DOMID_SELF )
  47.430 +            dom = current->domain->domain_id;
  47.431 +
  47.432 +        read_lock(&domlist_lock);
  47.433 +
  47.434 +        for_each_domain ( d )
  47.435 +        {
  47.436 +            if ( d->domain_id >= dom )
  47.437 +                break;
  47.438 +        }
  47.439 +
  47.440 +        if ( (d == NULL) || !get_domain(d) )
  47.441 +        {
  47.442 +            read_unlock(&domlist_lock);
  47.443 +            ret = -ESRCH;
  47.444 +            break;
  47.445 +        }
  47.446 +
  47.447 +        read_unlock(&domlist_lock);
  47.448 +
  47.449 +        getdomaininfo(d, &op->u.getdomaininfo);
  47.450 +
  47.451 +        op->domain = op->u.getdomaininfo.domain;
  47.452 +        if ( copy_to_guest(u_domctl, op, 1) )
  47.453 +            ret = -EFAULT;
  47.454 +
  47.455 +        put_domain(d);
  47.456 +    }
  47.457 +    break;
  47.458 +
  47.459 +    case XEN_DOMCTL_getvcpucontext:
  47.460 +    { 
  47.461 +        struct vcpu_guest_context *c;
  47.462 +        struct domain             *d;
  47.463 +        struct vcpu               *v;
  47.464 +
  47.465 +        ret = -ESRCH;
  47.466 +        if ( (d = find_domain_by_id(op->domain)) == NULL )
  47.467 +            break;
  47.468 +
  47.469 +        ret = -EINVAL;
  47.470 +        if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS )
  47.471 +            goto getvcpucontext_out;
  47.472 +
  47.473 +        ret = -ESRCH;
  47.474 +        if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL )
  47.475 +            goto getvcpucontext_out;
  47.476 +
  47.477 +        ret = -ENODATA;
  47.478 +        if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
  47.479 +            goto getvcpucontext_out;
  47.480 +
  47.481 +        ret = -ENOMEM;
  47.482 +        if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
  47.483 +            goto getvcpucontext_out;
  47.484 +
  47.485 +        if ( v != current )
  47.486 +            vcpu_pause(v);
  47.487 +
  47.488 +        arch_getdomaininfo_ctxt(v,c);
  47.489 +        ret = 0;
  47.490 +
  47.491 +        if ( v != current )
  47.492 +            vcpu_unpause(v);
  47.493 +
  47.494 +        if ( copy_to_guest(op->u.vcpucontext.ctxt, c, 1) )
  47.495 +            ret = -EFAULT;
  47.496 +
  47.497 +        xfree(c);
  47.498 +
  47.499 +        if ( copy_to_guest(u_domctl, op, 1) )
  47.500 +            ret = -EFAULT;
  47.501 +
  47.502 +    getvcpucontext_out:
  47.503 +        put_domain(d);
  47.504 +    }
  47.505 +    break;
  47.506 +
  47.507 +    case XEN_DOMCTL_getvcpuinfo:
  47.508 +    { 
  47.509 +        struct domain *d;
  47.510 +        struct vcpu   *v;
  47.511 +        struct vcpu_runstate_info runstate;
  47.512 +
  47.513 +        ret = -ESRCH;
  47.514 +        if ( (d = find_domain_by_id(op->domain)) == NULL )
  47.515 +            break;
  47.516 +
  47.517 +        ret = -EINVAL;
  47.518 +        if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
  47.519 +            goto getvcpuinfo_out;
  47.520 +
  47.521 +        ret = -ESRCH;
  47.522 +        if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
  47.523 +            goto getvcpuinfo_out;
  47.524 +
  47.525 +        vcpu_runstate_get(v, &runstate);
  47.526 +
  47.527 +        op->u.getvcpuinfo.online   = !test_bit(_VCPUF_down, &v->vcpu_flags);
  47.528 +        op->u.getvcpuinfo.blocked  = test_bit(_VCPUF_blocked, &v->vcpu_flags);
  47.529 +        op->u.getvcpuinfo.running  = test_bit(_VCPUF_running, &v->vcpu_flags);
  47.530 +        op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
  47.531 +        op->u.getvcpuinfo.cpu      = v->processor;
  47.532 +        ret = 0;
  47.533 +
  47.534 +        if ( copy_to_guest(u_domctl, op, 1) )
  47.535 +            ret = -EFAULT;
  47.536 +
  47.537 +    getvcpuinfo_out:
  47.538 +        put_domain(d);
  47.539 +    }
  47.540 +    break;
  47.541 +
  47.542 +    case XEN_DOMCTL_max_mem:
  47.543 +    {
  47.544 +        struct domain *d;
  47.545 +        unsigned long new_max;
  47.546 +
  47.547 +        ret = -ESRCH;
  47.548 +        d = find_domain_by_id(op->domain);
  47.549 +        if ( d == NULL )
  47.550 +            break;
  47.551 +
  47.552 +        ret = -EINVAL;
  47.553 +        new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
  47.554 +
  47.555 +        spin_lock(&d->page_alloc_lock);
  47.556 +        if ( new_max >= d->tot_pages )
  47.557 +        {
  47.558 +            d->max_pages = new_max;
  47.559 +            ret = 0;
  47.560 +        }
  47.561 +        spin_unlock(&d->page_alloc_lock);
  47.562 +
  47.563 +        put_domain(d);
  47.564 +    }
  47.565 +    break;
  47.566 +
  47.567 +    case XEN_DOMCTL_setdomainhandle:
  47.568 +    {
  47.569 +        struct domain *d;
  47.570 +        ret = -ESRCH;
  47.571 +        d = find_domain_by_id(op->domain);
  47.572 +        if ( d != NULL )
  47.573 +        {
  47.574 +            memcpy(d->handle, op->u.setdomainhandle.handle,
  47.575 +                   sizeof(xen_domain_handle_t));
  47.576 +            put_domain(d);
  47.577 +            ret = 0;
  47.578 +        }
  47.579 +    }
  47.580 +    break;
  47.581 +
  47.582 +    case XEN_DOMCTL_setdebugging:
  47.583 +    {
  47.584 +        struct domain *d;
  47.585 +        ret = -ESRCH;
  47.586 +        d = find_domain_by_id(op->domain);
  47.587 +        if ( d != NULL )
  47.588 +        {
  47.589 +            if ( op->u.setdebugging.enable )
  47.590 +                set_bit(_DOMF_debugging, &d->domain_flags);
  47.591 +            else
  47.592 +                clear_bit(_DOMF_debugging, &d->domain_flags);
  47.593 +            put_domain(d);
  47.594 +            ret = 0;
  47.595 +        }
  47.596 +    }
  47.597 +    break;
  47.598 +
  47.599 +    case XEN_DOMCTL_irq_permission:
  47.600 +    {
  47.601 +        struct domain *d;
  47.602 +        unsigned int pirq = op->u.irq_permission.pirq;
  47.603 +
  47.604 +        ret = -EINVAL;
  47.605 +        if ( pirq >= NR_IRQS )
  47.606 +            break;
  47.607 +
  47.608 +        ret = -ESRCH;
  47.609 +        d = find_domain_by_id(op->domain);
  47.610 +        if ( d == NULL )
  47.611 +            break;
  47.612 +
  47.613 +        if ( op->u.irq_permission.allow_access )
  47.614 +            ret = irq_permit_access(d, pirq);
  47.615 +        else
  47.616 +            ret = irq_deny_access(d, pirq);
  47.617 +
  47.618 +        put_domain(d);
  47.619 +    }
  47.620 +    break;
  47.621 +
  47.622 +    case XEN_DOMCTL_iomem_permission:
  47.623 +    {
  47.624 +        struct domain *d;
  47.625 +        unsigned long mfn = op->u.iomem_permission.first_mfn;
  47.626 +        unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
  47.627 +
  47.628 +        ret = -EINVAL;
  47.629 +        if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
  47.630 +            break;
  47.631 +
  47.632 +        ret = -ESRCH;
  47.633 +        d = find_domain_by_id(op->domain);
  47.634 +        if ( d == NULL )
  47.635 +            break;
  47.636 +
  47.637 +        if ( op->u.iomem_permission.allow_access )
  47.638 +            ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
  47.639 +        else
  47.640 +            ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
  47.641 +
  47.642 +        put_domain(d);
  47.643 +    }
  47.644 +    break;
  47.645 +
  47.646 +    case XEN_DOMCTL_settimeoffset:
  47.647 +    {
  47.648 +        struct domain *d;
  47.649 +
  47.650 +        ret = -ESRCH;
  47.651 +        d = find_domain_by_id(op->domain);
  47.652 +        if ( d != NULL )
  47.653 +        {
  47.654 +            d->time_offset_seconds = op->u.settimeoffset.time_offset_seconds;
  47.655 +            put_domain(d);
  47.656 +            ret = 0;
  47.657 +        }
  47.658 +    }
  47.659 +    break;
  47.660 +
  47.661 +    default:
  47.662 +        ret = arch_do_domctl(op, u_domctl);
  47.663 +        break;
  47.664 +    }
  47.665 +
  47.666 +    spin_unlock(&domctl_lock);
  47.667 +
  47.668 +    if ( ret == 0 )
  47.669 +        acm_post_domctl(op, &ssid);
  47.670 +    else
  47.671 +        acm_fail_domctl(op, &ssid);
  47.672 +
  47.673 +    return ret;
  47.674 +}
  47.675 +
  47.676 +/*
  47.677 + * Local variables:
  47.678 + * mode: C
  47.679 + * c-set-style: "BSD"
  47.680 + * c-basic-offset: 4
  47.681 + * tab-width: 4
  47.682 + * indent-tabs-mode: nil
  47.683 + * End:
  47.684 + */
    48.1 --- a/xen/common/perfc.c	Fri Aug 25 10:39:24 2006 +0100
    48.2 +++ b/xen/common/perfc.c	Fri Aug 25 18:39:10 2006 +0100
    48.3 @@ -7,7 +7,6 @@
    48.4  #include <xen/spinlock.h>
    48.5  #include <xen/mm.h>
    48.6  #include <xen/guest_access.h>
    48.7 -#include <public/dom0_ops.h>
    48.8  #include <asm/perfc.h>
    48.9  
   48.10  #undef  PERFCOUNTER
   48.11 @@ -218,20 +217,19 @@ static int perfc_copy_info(XEN_GUEST_HAN
   48.12  int perfc_control(dom0_perfccontrol_t *pc)
   48.13  {
   48.14      static DEFINE_SPINLOCK(lock);
   48.15 -    u32 op = pc->op;
   48.16      int rc;
   48.17  
   48.18      spin_lock(&lock);
   48.19  
   48.20 -    switch ( op )
   48.21 +    switch ( pc->cmd )
   48.22      {
   48.23 -    case DOM0_PERFCCONTROL_OP_RESET:
   48.24 +    case XEN_SYSCTL_PERFCOP_reset:
   48.25          perfc_copy_info(pc->desc, pc->val);
   48.26          perfc_reset(0);
   48.27          rc = 0;
   48.28          break;
   48.29  
   48.30 -    case DOM0_PERFCCONTROL_OP_QUERY:
   48.31 +    case XEN_SYSCTL_PERFCOP_query:
   48.32          perfc_copy_info(pc->desc, pc->val);
   48.33          rc = 0;
   48.34          break;
    49.1 --- a/xen/common/sched_credit.c	Fri Aug 25 10:39:24 2006 +0100
    49.2 +++ b/xen/common/sched_credit.c	Fri Aug 25 18:39:10 2006 +0100
    49.3 @@ -614,34 +614,34 @@ csched_vcpu_set_affinity(struct vcpu *vc
    49.4  static int
    49.5  csched_dom_cntl(
    49.6      struct domain *d,
    49.7 -    struct sched_adjdom_cmd *cmd)
    49.8 +    struct xen_domctl_scheduler_op *op)
    49.9  {
   49.10      struct csched_dom * const sdom = CSCHED_DOM(d);
   49.11      unsigned long flags;
   49.12  
   49.13 -    if ( cmd->direction == SCHED_INFO_GET )
   49.14 +    if ( op->cmd == XEN_DOMCTL_SCHEDOP_putinfo )
   49.15      {
   49.16 -        cmd->u.credit.weight = sdom->weight;
   49.17 -        cmd->u.credit.cap = sdom->cap;
   49.18 +        op->u.credit.weight = sdom->weight;
   49.19 +        op->u.credit.cap = sdom->cap;
   49.20      }
   49.21      else
   49.22      {
   49.23 -        ASSERT( cmd->direction == SCHED_INFO_PUT );
   49.24 +        ASSERT(op->cmd == XEN_DOMCTL_SCHEDOP_getinfo);
   49.25  
   49.26          spin_lock_irqsave(&csched_priv.lock, flags);
   49.27  
   49.28 -        if ( cmd->u.credit.weight != 0 )
   49.29 +        if ( op->u.credit.weight != 0 )
   49.30          {
   49.31              if ( !list_empty(&sdom->active_sdom_elem) )
   49.32              {
   49.33                  csched_priv.weight -= sdom->weight;
   49.34 -                csched_priv.weight += cmd->u.credit.weight;
   49.35 +                csched_priv.weight += op->u.credit.weight;
   49.36              }
   49.37 -            sdom->weight = cmd->u.credit.weight;
   49.38 +            sdom->weight = op->u.credit.weight;
   49.39          }
   49.40  
   49.41 -        if ( cmd->u.credit.cap != (uint16_t)~0U )
   49.42 -            sdom->cap = cmd->u.credit.cap;
   49.43 +        if ( op->u.credit.cap != (uint16_t)~0U )
   49.44 +            sdom->cap = op->u.credit.cap;
   49.45  
   49.46          spin_unlock_irqrestore(&csched_priv.lock, flags);
   49.47      }
   49.48 @@ -1215,7 +1215,7 @@ csched_init(void)
   49.49  struct scheduler sched_credit_def = {
   49.50      .name           = "SMP Credit Scheduler",
   49.51      .opt_name       = "credit",
   49.52 -    .sched_id       = SCHED_CREDIT,
   49.53 +    .sched_id       = XEN_SCHEDULER_CREDIT,
   49.54  
   49.55      .init_vcpu      = csched_vcpu_init,
   49.56      .destroy_domain = csched_dom_destroy,
   49.57 @@ -1225,7 +1225,7 @@ struct scheduler sched_credit_def = {
   49.58  
   49.59      .set_affinity   = csched_vcpu_set_affinity,
   49.60  
   49.61 -    .adjdom         = csched_dom_cntl,
   49.62 +    .adjust         = csched_dom_cntl,
   49.63  
   49.64      .tick           = csched_tick,
   49.65      .do_schedule    = csched_schedule,
    50.1 --- a/xen/common/sched_sedf.c	Fri Aug 25 10:39:24 2006 +0100
    50.2 +++ b/xen/common/sched_sedf.c	Fri Aug 25 18:39:10 2006 +0100
    50.3 @@ -8,7 +8,6 @@
    50.4  #include <xen/lib.h>
    50.5  #include <xen/sched.h>
    50.6  #include <xen/sched-if.h>
    50.7 -#include <public/sched_ctl.h>
    50.8  #include <xen/timer.h>
    50.9  #include <xen/softirq.h>
   50.10  #include <xen/time.h>
   50.11 @@ -1297,7 +1296,7 @@ static void sedf_dump_cpu_state(int i)
   50.12  
   50.13  
   50.14  /* Adjusts periods and slices of the domains accordingly to their weights. */
   50.15 -static int sedf_adjust_weights(struct sched_adjdom_cmd *cmd)
   50.16 +static int sedf_adjust_weights(struct xen_domctl_scheduler_op *cmd)
   50.17  {
   50.18      struct vcpu *p;
   50.19      struct domain      *d;
   50.20 @@ -1352,29 +1351,29 @@ static int sedf_adjust_weights(struct sc
   50.21  
   50.22  
   50.23  /* set or fetch domain scheduling parameters */
   50.24 -static int sedf_adjdom(struct domain *p, struct sched_adjdom_cmd *cmd)
   50.25 +static int sedf_adjust(struct domain *p, struct xen_domctl_scheduler_op *op)
   50.26  {
   50.27      struct vcpu *v;
   50.28  
   50.29 -    PRINT(2,"sedf_adjdom was called, domain-id %i new period %"PRIu64" "
   50.30 +    PRINT(2,"sedf_adjust was called, domain-id %i new period %"PRIu64" "
   50.31            "new slice %"PRIu64"\nlatency %"PRIu64" extra:%s\n",
   50.32 -          p->domain_id, cmd->u.sedf.period, cmd->u.sedf.slice,
   50.33 -          cmd->u.sedf.latency, (cmd->u.sedf.extratime)?"yes":"no");
   50.34 +          p->domain_id, op->u.sedf.period, op->u.sedf.slice,
   50.35 +          op->u.sedf.latency, (op->u.sedf.extratime)?"yes":"no");
   50.36  
   50.37 -    if ( cmd->direction == SCHED_INFO_PUT )
   50.38 +    if ( op->cmd == XEN_DOMCTL_SCHEDOP_putinfo )
   50.39      {
   50.40          /* Check for sane parameters. */
   50.41 -        if ( !cmd->u.sedf.period && !cmd->u.sedf.weight )
   50.42 +        if ( !op->u.sedf.period && !op->u.sedf.weight )
   50.43              return -EINVAL;
   50.44 -        if ( cmd->u.sedf.weight )
   50.45 +        if ( op->u.sedf.weight )
   50.46          {
   50.47 -            if ( (cmd->u.sedf.extratime & EXTRA_AWARE) &&
   50.48 -                 (!cmd->u.sedf.period) )
   50.49 +            if ( (op->u.sedf.extratime & EXTRA_AWARE) &&
   50.50 +                 (!op->u.sedf.period) )
   50.51              {
   50.52                  /* Weight-driven domains with extratime only. */
   50.53                  for_each_vcpu ( p, v )
   50.54                  {
   50.55 -                    EDOM_INFO(v)->extraweight = cmd->u.sedf.weight;
   50.56 +                    EDOM_INFO(v)->extraweight = op->u.sedf.weight;
   50.57                      EDOM_INFO(v)->weight = 0;
   50.58                      EDOM_INFO(v)->slice = 0;
   50.59                      EDOM_INFO(v)->period = WEIGHT_PERIOD;
   50.60 @@ -1384,7 +1383,7 @@ static int sedf_adjdom(struct domain *p,
   50.61              {
   50.62                  /* Weight-driven domains with real-time execution. */
   50.63                  for_each_vcpu ( p, v )
   50.64 -                    EDOM_INFO(v)->weight = cmd->u.sedf.weight;
   50.65 +                    EDOM_INFO(v)->weight = op->u.sedf.weight;
   50.66              }
   50.67          }
   50.68          else
   50.69 @@ -1396,51 +1395,51 @@ static int sedf_adjdom(struct domain *p,
   50.70                   * Sanity checking: note that disabling extra weight requires
   50.71                   * that we set a non-zero slice.
   50.72                   */
   50.73 -                if ( (cmd->u.sedf.period > PERIOD_MAX) ||
   50.74 -                     (cmd->u.sedf.period < PERIOD_MIN) ||
   50.75 -                     (cmd->u.sedf.slice  > cmd->u.sedf.period) ||
   50.76 -                     (cmd->u.sedf.slice  < SLICE_MIN) )
   50.77 +                if ( (op->u.sedf.period > PERIOD_MAX) ||
   50.78 +                     (op->u.sedf.period < PERIOD_MIN) ||
   50.79 +                     (op->u.sedf.slice  > op->u.sedf.period) ||
   50.80 +                     (op->u.sedf.slice  < SLICE_MIN) )
   50.81                      return -EINVAL;
   50.82                  EDOM_INFO(v)->weight = 0;
   50.83                  EDOM_INFO(v)->extraweight = 0;
   50.84                  EDOM_INFO(v)->period_orig = 
   50.85 -                    EDOM_INFO(v)->period  = cmd->u.sedf.period;
   50.86 +                    EDOM_INFO(v)->period  = op->u.sedf.period;
   50.87                  EDOM_INFO(v)->slice_orig  = 
   50.88 -                    EDOM_INFO(v)->slice   = cmd->u.sedf.slice;
   50.89 +                    EDOM_INFO(v)->slice   = op->u.sedf.slice;
   50.90              }
   50.91          }
   50.92  
   50.93 -        if ( sedf_adjust_weights(cmd) )
   50.94 +        if ( sedf_adjust_weights(op) )
   50.95              return -EINVAL;
   50.96  
   50.97          for_each_vcpu ( p, v )
   50.98          {
   50.99              EDOM_INFO(v)->status  = 
  50.100                  (EDOM_INFO(v)->status &
  50.101 -                 ~EXTRA_AWARE) | (cmd->u.sedf.extratime & EXTRA_AWARE);
  50.102 -            EDOM_INFO(v)->latency = cmd->u.sedf.latency;
  50.103 +                 ~EXTRA_AWARE) | (op->u.sedf.extratime & EXTRA_AWARE);
  50.104 +            EDOM_INFO(v)->latency = op->u.sedf.latency;
  50.105              extraq_check(v);
  50.106          }
  50.107      }
  50.108 -    else if ( cmd->direction == SCHED_INFO_GET )
  50.109 +    else if ( op->cmd == XEN_DOMCTL_SCHEDOP_getinfo )
  50.110      {
  50.111          if ( p->vcpu[0] == NULL )
  50.112              return -EINVAL;
  50.113 -        cmd->u.sedf.period    = EDOM_INFO(p->vcpu[0])->period;
  50.114 -        cmd->u.sedf.slice     = EDOM_INFO(p->vcpu[0])->slice;
  50.115 -        cmd->u.sedf.extratime = EDOM_INFO(p->vcpu[0])->status & EXTRA_AWARE;
  50.116 -        cmd->u.sedf.latency   = EDOM_INFO(p->vcpu[0])->latency;
  50.117 -        cmd->u.sedf.weight    = EDOM_INFO(p->vcpu[0])->weight;
  50.118 +        op->u.sedf.period    = EDOM_INFO(p->vcpu[0])->period;
  50.119 +        op->u.sedf.slice     = EDOM_INFO(p->vcpu[0])->slice;
  50.120 +        op->u.sedf.extratime = EDOM_INFO(p->vcpu[0])->status & EXTRA_AWARE;
  50.121 +        op->u.sedf.latency   = EDOM_INFO(p->vcpu[0])->latency;
  50.122 +        op->u.sedf.weight    = EDOM_INFO(p->vcpu[0])->weight;
  50.123      }
  50.124  
  50.125 -    PRINT(2,"sedf_adjdom_finished\n");
  50.126 +    PRINT(2,"sedf_adjust_finished\n");
  50.127      return 0;
  50.128  }
  50.129  
  50.130  struct scheduler sched_sedf_def = {
  50.131      .name     = "Simple EDF Scheduler",
  50.132      .opt_name = "sedf",
  50.133 -    .sched_id = SCHED_SEDF,
  50.134 +    .sched_id = XEN_SCHEDULER_SEDF,
  50.135      
  50.136      .init_vcpu      = sedf_init_vcpu,
  50.137      .destroy_domain = sedf_destroy_domain,
  50.138 @@ -1449,7 +1448,7 @@ struct scheduler sched_sedf_def = {
  50.139      .dump_cpu_state = sedf_dump_cpu_state,
  50.140      .sleep          = sedf_sleep,
  50.141      .wake           = sedf_wake,
  50.142 -    .adjdom         = sedf_adjdom,
  50.143 +    .adjust         = sedf_adjust,
  50.144      .set_affinity   = sedf_set_affinity
  50.145  };
  50.146  
    51.1 --- a/xen/common/schedule.c	Fri Aug 25 10:39:24 2006 +0100
    51.2 +++ b/xen/common/schedule.c	Fri Aug 25 18:39:10 2006 +0100
    51.3 @@ -30,7 +30,6 @@
    51.4  #include <xen/errno.h>
    51.5  #include <xen/guest_access.h>
    51.6  #include <public/sched.h>
    51.7 -#include <public/sched_ctl.h>
    51.8  
    51.9  extern void arch_getdomaininfo_ctxt(struct vcpu *,
   51.10                                      struct vcpu_guest_context *);
   51.11 @@ -427,32 +426,16 @@ int sched_id(void)
   51.12      return ops.sched_id;
   51.13  }
   51.14  
   51.15 -long sched_ctl(struct sched_ctl_cmd *cmd)
   51.16 +/* Adjust scheduling parameter for a given domain. */
   51.17 +long sched_adjust(struct domain *d, struct xen_domctl_scheduler_op *op)
   51.18  {
   51.19 -    if ( cmd->sched_id != ops.sched_id )
   51.20 -        return -EINVAL;
   51.21 -
   51.22 -    SCHED_OP(control, cmd);
   51.23 -    TRACE_0D(TRC_SCHED_CTL);
   51.24 -    return 0;
   51.25 -}
   51.26 -
   51.27 -
   51.28 -/* Adjust scheduling parameter for a given domain. */
   51.29 -long sched_adjdom(struct sched_adjdom_cmd *cmd)
   51.30 -{
   51.31 -    struct domain *d;
   51.32      struct vcpu *v;
   51.33      
   51.34 -    if ( (cmd->sched_id != ops.sched_id) ||
   51.35 -         ((cmd->direction != SCHED_INFO_PUT) &&
   51.36 -          (cmd->direction != SCHED_INFO_GET)) )
   51.37 +    if ( (op->sched_id != ops.sched_id) ||
   51.38 +         ((op->cmd != XEN_DOMCTL_SCHEDOP_putinfo) &&
   51.39 +          (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo)) )
   51.40          return -EINVAL;
   51.41  
   51.42 -    d = find_domain_by_id(cmd->domain);
   51.43 -    if ( d == NULL )
   51.44 -        return -ESRCH;
   51.45 -
   51.46      /*
   51.47       * Most VCPUs we can simply pause. If we are adjusting this VCPU then
   51.48       * we acquire the local schedule_lock to guard against concurrent updates.
   51.49 @@ -475,7 +458,7 @@ long sched_adjdom(struct sched_adjdom_cm
   51.50      if ( d == current->domain )
   51.51          vcpu_schedule_lock_irq(current);
   51.52  
   51.53 -    SCHED_OP(adjdom, d, cmd);
   51.54 +    SCHED_OP(adjust, d, op);
   51.55      TRACE_1D(TRC_SCHED_ADJDOM, d->domain_id);
   51.56  
   51.57      if ( d == current->domain )
   51.58 @@ -487,8 +470,6 @@ long sched_adjdom(struct sched_adjdom_cm
   51.59              vcpu_unpause(v);
   51.60      }
   51.61  
   51.62 -    put_domain(d);
   51.63 -
   51.64      return 0;
   51.65  }
   51.66  
    52.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    52.2 +++ b/xen/common/sysctl.c	Fri Aug 25 18:39:10 2006 +0100
    52.3 @@ -0,0 +1,152 @@
    52.4 +/******************************************************************************
    52.5 + * sysctl.c
    52.6 + * 
    52.7 + * System management operations. For use by node control stack.
    52.8 + * 
    52.9 + * Copyright (c) 2002-2006, K Fraser
   52.10 + */
   52.11 +
   52.12 +#include <xen/config.h>
   52.13 +#include <xen/types.h>
   52.14 +#include <xen/lib.h>
   52.15 +#include <xen/mm.h>
   52.16 +#include <xen/sched.h>
   52.17 +#include <xen/domain.h>
   52.18 +#include <xen/event.h>
   52.19 +#include <xen/domain_page.h>
   52.20 +#include <xen/trace.h>
   52.21 +#include <xen/console.h>
   52.22 +#include <xen/iocap.h>
   52.23 +#include <xen/guest_access.h>
   52.24 +#include <asm/current.h>
   52.25 +#include <public/sysctl.h>
   52.26 +
   52.27 +extern long arch_do_sysctl(
   52.28 +    struct xen_sysctl *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl);
   52.29 +extern void getdomaininfo(
   52.30 +    struct domain *d, struct xen_domctl_getdomaininfo *info);
   52.31 +
   52.32 +long do_sysctl(XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
   52.33 +{
   52.34 +    long ret = 0;
   52.35 +    struct xen_sysctl curop, *op = &curop;
   52.36 +    static DEFINE_SPINLOCK(sysctl_lock);
   52.37 +
   52.38 +    if ( !IS_PRIV(current->domain) )
   52.39 +        return -EPERM;
   52.40 +
   52.41 +    if ( copy_from_guest(op, u_sysctl, 1) )
   52.42 +        return -EFAULT;
   52.43 +
   52.44 +    if ( op->interface_version != XEN_SYSCTL_INTERFACE_VERSION )
   52.45 +        return -EACCES;
   52.46 +
   52.47 +    spin_lock(&sysctl_lock);
   52.48 +
   52.49 +    switch ( op->cmd )
   52.50 +    {
   52.51 +    case XEN_SYSCTL_readconsole:
   52.52 +    {
   52.53 +        ret = read_console_ring(
   52.54 +            guest_handle_cast(op->u.readconsole.buffer, char),
   52.55 +            &op->u.readconsole.count,
   52.56 +            op->u.readconsole.clear);
   52.57 +        if ( copy_to_guest(u_sysctl, op, 1) )
   52.58 +            ret = -EFAULT;
   52.59 +    }
   52.60 +    break;
   52.61 +
   52.62 +    case XEN_SYSCTL_tbuf_op:
   52.63 +    {
   52.64 +        ret = tb_control(&op->u.tbuf_op);
   52.65 +        if ( copy_to_guest(u_sysctl, op, 1) )
   52.66 +            ret = -EFAULT;
   52.67 +    }
   52.68 +    break;
   52.69 +    
   52.70 +    case XEN_SYSCTL_sched_id:
   52.71 +    {
   52.72 +        op->u.sched_id.sched_id = sched_id();
   52.73 +        if ( copy_to_guest(u_sysctl, op, 1) )
   52.74 +            ret = -EFAULT;
   52.75 +        else
   52.76 +            ret = 0;
   52.77 +    }
   52.78 +    break;
   52.79 +
   52.80 +    case XEN_SYSCTL_getdomaininfolist:
   52.81 +    { 
   52.82 +        struct domain *d;
   52.83 +        struct xen_domctl_getdomaininfo info;
   52.84 +        u32 num_domains = 0;
   52.85 +
   52.86 +        read_lock(&domlist_lock);
   52.87 +
   52.88 +        for_each_domain ( d )
   52.89 +        {
   52.90 +            if ( d->domain_id < op->u.getdomaininfolist.first_domain )
   52.91 +                continue;
   52.92 +            if ( num_domains == op->u.getdomaininfolist.max_domains )
   52.93 +                break;
   52.94 +            if ( (d == NULL) || !get_domain(d) )
   52.95 +            {
   52.96 +                ret = -ESRCH;
   52.97 +                break;
   52.98 +            }
   52.99 +
  52.100 +            getdomaininfo(d, &info);
  52.101 +
  52.102 +            put_domain(d);
  52.103 +
  52.104 +            if ( copy_to_guest_offset(op->u.getdomaininfolist.buffer,
  52.105 +                                      num_domains, &info, 1) )
  52.106 +            {
  52.107 +                ret = -EFAULT;
  52.108 +                break;
  52.109 +            }
  52.110 +            
  52.111 +            num_domains++;
  52.112 +        }
  52.113 +        
  52.114 +        read_unlock(&domlist_lock);
  52.115 +        
  52.116 +        if ( ret != 0 )
  52.117 +            break;
  52.118 +        
  52.119 +        op->u.getdomaininfolist.num_domains = num_domains;
  52.120 +
  52.121 +        if ( copy_to_guest(u_sysctl, op, 1) )
  52.122 +            ret = -EFAULT;
  52.123 +    }
  52.124 +    break;
  52.125 +
  52.126 +#ifdef PERF_COUNTERS
  52.127 +    case XEN_SYSCTL_perfccontrol:
  52.128 +    {
  52.129 +        extern int perfc_control(xen_sysctl_perfccontrol_t *);
  52.130 +        ret = perfc_control(&op->u.perfccontrol);
  52.131 +        if ( copy_to_guest(u_sysctl, op, 1) )
  52.132 +            ret = -EFAULT;
  52.133 +    }
  52.134 +    break;
  52.135 +#endif
  52.136 +
  52.137 +    default:
  52.138 +        ret = arch_do_sysctl(op, u_sysctl);
  52.139 +        break;
  52.140 +    }
  52.141 +
  52.142 +    spin_unlock(&sysctl_lock);
  52.143 +
  52.144 +    return ret;
  52.145 +}
  52.146 +
  52.147 +/*
  52.148 + * Local variables:
  52.149 + * mode: C
  52.150 + * c-set-style: "BSD"
  52.151 + * c-basic-offset: 4
  52.152 + * tab-width: 4
  52.153 + * indent-tabs-mode: nil
  52.154 + * End:
  52.155 + */
    53.1 --- a/xen/common/trace.c	Fri Aug 25 10:39:24 2006 +0100
    53.2 +++ b/xen/common/trace.c	Fri Aug 25 18:39:10 2006 +0100
    53.3 @@ -14,9 +14,6 @@
    53.4   * The trace buffer code is designed to allow debugging traces of Xen to be
    53.5   * generated on UP / SMP machines.  Each trace entry is timestamped so that
    53.6   * it's possible to reconstruct a chronological record of trace events.
    53.7 - *
    53.8 - * See also include/xen/trace.h and the dom0 op in
    53.9 - * include/public/dom0_ops.h
   53.10   */
   53.11  
   53.12  #include <xen/config.h>
   53.13 @@ -33,7 +30,7 @@
   53.14  #include <xen/mm.h>
   53.15  #include <xen/percpu.h>
   53.16  #include <asm/atomic.h>
   53.17 -#include <public/dom0_ops.h>
   53.18 +#include <public/sysctl.h>
   53.19  
   53.20  /* opt_tbuf_size: trace buffer size (in pages) */
   53.21  static unsigned int opt_tbuf_size = 0;
   53.22 @@ -56,7 +53,7 @@ static DEFINE_PER_CPU(unsigned long, los
   53.23  int tb_init_done;
   53.24  
   53.25  /* which CPUs tracing is enabled on */
   53.26 -static unsigned long tb_cpu_mask = (~0UL);
   53.27 +static cpumask_t tb_cpu_mask = CPU_MASK_ALL;
   53.28  
   53.29  /* which tracing events are enabled */
   53.30  static u32 tb_event_mask = TRC_ALL;
   53.31 @@ -171,43 +168,41 @@ void init_trace_bufs(void)
   53.32      }
   53.33  }
   53.34  
   53.35 -
   53.36  /**
   53.37 - * tb_control - DOM0 operations on trace buffers.
   53.38 - * @tbc: a pointer to a dom0_tbufcontrol_t to be filled out
   53.39 + * tb_control - sysctl operations on trace buffers.
   53.40 + * @tbc: a pointer to a xen_sysctl_tbuf_op_t to be filled out
   53.41   */
   53.42 -int tb_control(dom0_tbufcontrol_t *tbc)
   53.43 +int tb_control(xen_sysctl_tbuf_op_t *tbc)
   53.44  {
   53.45      static DEFINE_SPINLOCK(lock);
   53.46      int rc = 0;
   53.47  
   53.48      spin_lock(&lock);
   53.49  
   53.50 -    switch ( tbc->op )
   53.51 +    switch ( tbc->cmd )
   53.52      {
   53.53 -    case DOM0_TBUF_GET_INFO:
   53.54 -        tbc->cpu_mask   = tb_cpu_mask;
   53.55 +    case XEN_SYSCTL_TBUFOP_get_info:
   53.56          tbc->evt_mask   = tb_event_mask;
   53.57          tbc->buffer_mfn = opt_tbuf_size ? virt_to_mfn(per_cpu(t_bufs, 0)) : 0;
   53.58          tbc->size       = opt_tbuf_size * PAGE_SIZE;
   53.59          break;
   53.60 -    case DOM0_TBUF_SET_CPU_MASK:
   53.61 -        tb_cpu_mask = tbc->cpu_mask;
   53.62 +    case XEN_SYSCTL_TBUFOP_set_cpu_mask:
   53.63 +        cpumask_to_xenctl_cpumap(&tbc->cpu_mask, &tb_cpu_mask);
   53.64          break;
   53.65 -    case DOM0_TBUF_SET_EVT_MASK:
   53.66 +    case XEN_SYSCTL_TBUFOP_set_evt_mask:
   53.67          tb_event_mask = tbc->evt_mask;
   53.68          break;
   53.69 -    case DOM0_TBUF_SET_SIZE:
   53.70 +    case XEN_SYSCTL_TBUFOP_set_size:
   53.71          rc = !tb_init_done ? tb_set_size(tbc->size) : -EINVAL;
   53.72          break;
   53.73 -    case DOM0_TBUF_ENABLE:
   53.74 +    case XEN_SYSCTL_TBUFOP_enable:
   53.75          /* Enable trace buffers. Check buffers are already allocated. */
   53.76          if ( opt_tbuf_size == 0 ) 
   53.77              rc = -EINVAL;
   53.78          else
   53.79              tb_init_done = 1;
   53.80          break;
   53.81 -    case DOM0_TBUF_DISABLE:
   53.82 +    case XEN_SYSCTL_TBUFOP_disable:
   53.83          /*
   53.84           * Disable trace buffers. Just stops new records from being written,
   53.85           * does not deallocate any memory.
   53.86 @@ -254,7 +249,7 @@ void trace(u32 event, unsigned long d1, 
   53.87                  & ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
   53.88          return;
   53.89  
   53.90 -    if ( (tb_cpu_mask & (1UL << smp_processor_id())) == 0 )
   53.91 +    if ( !cpu_isset(smp_processor_id(), tb_cpu_mask) )
   53.92          return;
   53.93  
   53.94      /* Read tb_init_done /before/ t_bufs. */
    54.1 --- a/xen/include/acm/acm_hooks.h	Fri Aug 25 10:39:24 2006 +0100
    54.2 +++ b/xen/include/acm/acm_hooks.h	Fri Aug 25 18:39:10 2006 +0100
    54.3 @@ -28,7 +28,7 @@
    54.4  #include <xen/multiboot.h>
    54.5  #include <public/acm.h>
    54.6  #include <acm/acm_core.h>
    54.7 -#include <public/dom0_ops.h>
    54.8 +#include <public/domctl.h>
    54.9  #include <public/event_channel.h>
   54.10  #include <asm/current.h>
   54.11  
   54.12 @@ -129,11 +129,11 @@ extern struct acm_operations *acm_second
   54.13  
   54.14  #ifndef ACM_SECURITY
   54.15  
   54.16 -static inline int acm_pre_dom0_op(struct dom0_op *op, void **ssid) 
   54.17 +static inline int acm_pre_domctl(struct xen_domctl *op, void **ssid) 
   54.18  { return 0; }
   54.19 -static inline void acm_post_dom0_op(struct dom0_op *op, void *ssid) 
   54.20 +static inline void acm_post_domctl(struct xen_domctl *op, void *ssid) 
   54.21  { return; }
   54.22 -static inline void acm_fail_dom0_op(struct dom0_op *op, void *ssid) 
   54.23 +static inline void acm_fail_domctl(struct xen_domctl *op, void *ssid) 
   54.24  { return; }
   54.25  static inline int acm_pre_eventchannel_unbound(domid_t id1, domid_t id2)
   54.26  { return 0; }
   54.27 @@ -225,23 +225,23 @@ static inline int acm_pre_eventchannel_i
   54.28          return ACM_ACCESS_PERMITTED;
   54.29  }
   54.30  
   54.31 -static inline int acm_pre_dom0_op(struct dom0_op *op, void **ssid) 
   54.32 +static inline int acm_pre_domctl(struct xen_domctl *op, void **ssid) 
   54.33  {
   54.34      int ret = -EACCES;
   54.35      struct domain *d;
   54.36  
   54.37      switch(op->cmd) {
   54.38 -    case DOM0_CREATEDOMAIN:
   54.39 +    case XEN_DOMCTL_createdomain:
   54.40          ret = acm_pre_domain_create(
   54.41              current->domain->ssid, op->u.createdomain.ssidref);
   54.42          break;
   54.43 -    case DOM0_DESTROYDOMAIN:
   54.44 +    case XEN_DOMCTL_destroydomain:
   54.45          if (*ssid != NULL) {
   54.46              printkd("%s: Warning. Overlapping destruction.\n", 
   54.47                      __func__);
   54.48              return -EACCES;
   54.49          }
   54.50 -        d = find_domain_by_id(op->u.destroydomain.domain);
   54.51 +        d = find_domain_by_id(op->domain);
   54.52          if (d != NULL) {
   54.53              *ssid = d->ssid; /* save for post destroy when d is gone */
   54.54              if (*ssid == NULL) {
   54.55 @@ -262,23 +262,23 @@ static inline int acm_pre_dom0_op(struct
   54.56      return ret;
   54.57  }
   54.58  
   54.59 -static inline void acm_post_dom0_op(struct dom0_op *op, void **ssid)
   54.60 +static inline void acm_post_domctl(struct xen_domctl *op, void **ssid)
   54.61  {
   54.62      switch(op->cmd) {
   54.63 -    case DOM0_CREATEDOMAIN:
   54.64 +    case XEN_DOMCTL_createdomain:
   54.65          /* initialialize shared sHype security labels for new domain */
   54.66          acm_init_domain_ssid(
   54.67 -            op->u.createdomain.domain, op->u.createdomain.ssidref);
   54.68 +            op->domain, op->u.createdomain.ssidref);
   54.69          acm_post_domain_create(
   54.70 -            op->u.createdomain.domain, op->u.createdomain.ssidref);
   54.71 +            op->domain, op->u.createdomain.ssidref);
   54.72          break;
   54.73 -    case DOM0_DESTROYDOMAIN:
   54.74 +    case XEN_DOMCTL_destroydomain:
   54.75          if (*ssid == NULL) {
   54.76              printkd("%s: ERROR. SSID unset.\n",
   54.77                      __func__);
   54.78              break;
   54.79          }
   54.80 -        acm_post_domain_destroy(*ssid, op->u.destroydomain.domain);
   54.81 +        acm_post_domain_destroy(*ssid, op->domain);
   54.82          /* free security ssid for the destroyed domain (also if null policy */
   54.83          acm_free_domain_ssid((struct acm_ssid_domain *)(*ssid));
   54.84          *ssid = NULL;
   54.85 @@ -286,14 +286,14 @@ static inline void acm_post_dom0_op(stru
   54.86      }
   54.87  }
   54.88  
   54.89 -static inline void acm_fail_dom0_op(struct dom0_op *op, void **ssid)
   54.90 +static inline void acm_fail_domctl(struct xen_domctl *op, void **ssid)
   54.91  {
   54.92      switch(op->cmd) {
   54.93 -    case DOM0_CREATEDOMAIN:
   54.94 +    case XEN_DOMCTL_createdomain:
   54.95          acm_fail_domain_create(
   54.96              current->domain->ssid, op->u.createdomain.ssidref);
   54.97          break;
   54.98 -    case DOM0_DESTROYDOMAIN:
   54.99 +    case XEN_DOMCTL_destroydomain:
  54.100          /*  we don't handle domain destroy failure but at least free the ssid */
  54.101          if (*ssid == NULL) {
  54.102              printkd("%s: ERROR. SSID unset.\n",
    55.1 --- a/xen/include/asm-ia64/domain.h	Fri Aug 25 10:39:24 2006 +0100
    55.2 +++ b/xen/include/asm-ia64/domain.h	Fri Aug 25 18:39:10 2006 +0100
    55.3 @@ -53,7 +53,7 @@ extern unsigned long domain_set_shared_i
    55.4  extern void domain_cache_flush (struct domain *d, int sync_only);
    55.5  
    55.6  /* Control the shadow mode.  */
    55.7 -extern int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc);
    55.8 +extern int shadow_mode_control(struct domain *d, xen_domctl_shadow_op_t *sc);
    55.9  
   55.10  /* Cleanly crash the current domain with a message.  */
   55.11  extern void panic_domain(struct pt_regs *, const char *, ...)
    56.1 --- a/xen/include/asm-x86/shadow2.h	Fri Aug 25 10:39:24 2006 +0100
    56.2 +++ b/xen/include/asm-x86/shadow2.h	Fri Aug 25 18:39:10 2006 +0100
    56.3 @@ -23,7 +23,7 @@
    56.4  #ifndef _XEN_SHADOW2_H
    56.5  #define _XEN_SHADOW2_H
    56.6  
    56.7 -#include <public/dom0_ops.h> 
    56.8 +#include <public/domctl.h> 
    56.9  #include <xen/sched.h>
   56.10  #include <xen/perfc.h>
   56.11  #include <asm/flushtlb.h>
   56.12 @@ -34,14 +34,14 @@
   56.13  /* We're in one of the shadow modes */
   56.14  #define SHM2_enable    (1U << SHM2_shift)
   56.15  /* Refcounts based on shadow tables instead of guest tables */
   56.16 -#define SHM2_refcounts (DOM0_SHADOW_ENABLE_REFCOUNT << SHM2_shift)
   56.17 +#define SHM2_refcounts (XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT << SHM2_shift)
   56.18  /* Enable log dirty mode */
   56.19 -#define SHM2_log_dirty (DOM0_SHADOW_ENABLE_LOG_DIRTY << SHM2_shift)
   56.20 +#define SHM2_log_dirty (XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY << SHM2_shift)
   56.21  /* Xen does p2m translation, not guest */
   56.22 -#define SHM2_translate (DOM0_SHADOW_ENABLE_TRANSLATE << SHM2_shift)
   56.23 +#define SHM2_translate (XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE << SHM2_shift)
   56.24  /* Xen does not steal address space from the domain for its own booking;
   56.25   * requires VT or similar mechanisms */
   56.26 -#define SHM2_external  (DOM0_SHADOW_ENABLE_EXTERNAL << SHM2_shift)
   56.27 +#define SHM2_external  (XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL << SHM2_shift)
   56.28  
   56.29  #define shadow2_mode_enabled(_d)   ((_d)->arch.shadow2.mode)
   56.30  #define shadow2_mode_refcounts(_d) ((_d)->arch.shadow2.mode & SHM2_refcounts)
   56.31 @@ -297,9 +297,9 @@ int shadow2_test_enable(struct domain *d
   56.32  
   56.33  /* Handler for shadow control ops: enabling and disabling shadow modes, 
   56.34   * and log-dirty bitmap ops all happen through here. */
   56.35 -int shadow2_control_op(struct domain *d, 
   56.36 -                       dom0_shadow_control_t *sc,
   56.37 -                       XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op);
   56.38 +int shadow2_domctl(struct domain *d, 
   56.39 +                   xen_domctl_shadow_op_t *sc,
   56.40 +                   XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
   56.41  
   56.42  /* Call when destroying a domain */
   56.43  void shadow2_teardown(struct domain *d);
    57.1 --- a/xen/include/public/acm.h	Fri Aug 25 10:39:24 2006 +0100
    57.2 +++ b/xen/include/public/acm.h	Fri Aug 25 18:39:10 2006 +0100
    57.3 @@ -9,7 +9,6 @@
    57.4  #define _XEN_PUBLIC_ACM_H
    57.5  
    57.6  #include "xen.h"
    57.7 -#include "sched_ctl.h"
    57.8  
    57.9  /* if ACM_DEBUG defined, all hooks should
   57.10   * print a short trace message (comment it out
    58.1 --- a/xen/include/public/acm_ops.h	Fri Aug 25 10:39:24 2006 +0100
    58.2 +++ b/xen/include/public/acm_ops.h	Fri Aug 25 18:39:10 2006 +0100
    58.3 @@ -9,7 +9,6 @@
    58.4  #define __XEN_PUBLIC_ACM_OPS_H__
    58.5  
    58.6  #include "xen.h"
    58.7 -#include "sched_ctl.h"
    58.8  #include "acm.h"
    58.9  
   58.10  /*
    59.1 --- a/xen/include/public/arch-ia64.h	Fri Aug 25 10:39:24 2006 +0100
    59.2 +++ b/xen/include/public/arch-ia64.h	Fri Aug 25 18:39:10 2006 +0100
    59.3 @@ -18,12 +18,15 @@
    59.4  
    59.5  #define DEFINE_XEN_GUEST_HANDLE(name)   __DEFINE_XEN_GUEST_HANDLE(name, name)
    59.6  #define XEN_GUEST_HANDLE(name)          __guest_handle_ ## name
    59.7 +#define XEN_GUEST_HANDLE_64(name)       __guest_handle_ ## name
    59.8  #define set_xen_guest_handle(hnd, val)  do { (hnd).p = val; } while (0)
    59.9  #ifdef __XEN_TOOLS__
   59.10  #define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
   59.11  #endif
   59.12  
   59.13  #ifndef __ASSEMBLY__
   59.14 +typedef uint64_t uint64_aligned_t;
   59.15 +
   59.16  /* Guest handles for primitive C types. */
   59.17  __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
   59.18  __DEFINE_XEN_GUEST_HANDLE(uint,  unsigned int);
    60.1 --- a/xen/include/public/arch-powerpc.h	Fri Aug 25 10:39:24 2006 +0100
    60.2 +++ b/xen/include/public/arch-powerpc.h	Fri Aug 25 18:39:10 2006 +0100
    60.3 @@ -29,6 +29,7 @@
    60.4  
    60.5  #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
    60.6  #define XEN_GUEST_HANDLE(name)        __guest_handle_ ## name
    60.7 +#define XEN_GUEST_HANDLE_64(name)     __guest_handle_ ## name
    60.8  #define set_xen_guest_handle(hnd, val) \
    60.9      do { \
   60.10          if (sizeof ((hnd).__pad)) \
   60.11 @@ -41,6 +42,8 @@
   60.12  #endif
   60.13  
   60.14  #ifndef __ASSEMBLY__
   60.15 +typedef uint64_t uint64_aligned_t;
   60.16 +
   60.17  /* Guest handles for primitive C types. */
   60.18  __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
   60.19  __DEFINE_XEN_GUEST_HANDLE(uint,  unsigned int);
    61.1 --- a/xen/include/public/arch-x86_32.h	Fri Aug 25 10:39:24 2006 +0100
    61.2 +++ b/xen/include/public/arch-x86_32.h	Fri Aug 25 18:39:10 2006 +0100
    61.3 @@ -27,9 +27,15 @@
    61.4  #define TRAP_INSTR "int $0x82"
    61.5  #endif
    61.6  
    61.7 -
    61.8  /* Structural guest handles introduced in 0x00030201. */
    61.9 -#if __XEN_INTERFACE_VERSION__ >= 0x00030201
   61.10 +#if (defined(__XEN__) || defined(__XEN_TOOLS__)) && !defined(__ASSEMBLY__)
   61.11 +typedef uint64_t __attribute__((aligned(8))) uint64_aligned_t;
   61.12 +#define __DEFINE_XEN_GUEST_HANDLE(name, type)                   \
   61.13 +    typedef struct { type *p; }                                 \
   61.14 +        __guest_handle_ ## name;                                \
   61.15 +    typedef struct { union { type *p; uint64_aligned_t q; }; }  \
   61.16 +        __guest_handle_64_ ## name
   61.17 +#elif __XEN_INTERFACE_VERSION__ >= 0x00030201
   61.18  #define __DEFINE_XEN_GUEST_HANDLE(name, type) \
   61.19      typedef struct { type *p; } __guest_handle_ ## name
   61.20  #else
   61.21 @@ -39,9 +45,15 @@
   61.22  
   61.23  #define DEFINE_XEN_GUEST_HANDLE(name)   __DEFINE_XEN_GUEST_HANDLE(name, name)
   61.24  #define XEN_GUEST_HANDLE(name)          __guest_handle_ ## name
   61.25 -#define set_xen_guest_handle(hnd, val)  do { (hnd).p = val; } while (0)
   61.26 +#define XEN_GUEST_HANDLE_64(name)       __guest_handle_64_ ## name
   61.27  #ifdef __XEN_TOOLS__
   61.28  #define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
   61.29 +#define set_xen_guest_handle(hnd, val)                      \
   61.30 +    do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0;   \
   61.31 +         (hnd).p = val;                                     \
   61.32 +    } while ( 0 )
   61.33 +#else
   61.34 +#define set_xen_guest_handle(hnd, val)  do { (hnd).p = val; } while (0)
   61.35  #endif
   61.36  
   61.37  #ifndef __ASSEMBLY__
    62.1 --- a/xen/include/public/arch-x86_64.h	Fri Aug 25 10:39:24 2006 +0100
    62.2 +++ b/xen/include/public/arch-x86_64.h	Fri Aug 25 18:39:10 2006 +0100
    62.3 @@ -39,12 +39,15 @@
    62.4  
    62.5  #define DEFINE_XEN_GUEST_HANDLE(name)   __DEFINE_XEN_GUEST_HANDLE(name, name)
    62.6  #define XEN_GUEST_HANDLE(name)          __guest_handle_ ## name
    62.7 +#define XEN_GUEST_HANDLE_64(name)       __guest_handle_ ## name
    62.8  #define set_xen_guest_handle(hnd, val)  do { (hnd).p = val; } while (0)
    62.9  #ifdef __XEN_TOOLS__
   62.10  #define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
   62.11  #endif
   62.12  
   62.13  #ifndef __ASSEMBLY__
   62.14 +typedef uint64_t uint64_aligned_t;
   62.15 +
   62.16  /* Guest handles for primitive C types. */
   62.17  __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
   62.18  __DEFINE_XEN_GUEST_HANDLE(uint,  unsigned int);
    63.1 --- a/xen/include/public/dom0_ops.h	Fri Aug 25 10:39:24 2006 +0100
    63.2 +++ b/xen/include/public/dom0_ops.h	Fri Aug 25 18:39:10 2006 +0100
    63.3 @@ -4,133 +4,49 @@
    63.4   * Process command requests from domain-0 guest OS.
    63.5   * 
    63.6   * Copyright (c) 2002-2003, B Dragovic
    63.7 - * Copyright (c) 2002-2004, K Fraser
    63.8 + * Copyright (c) 2002-2006, K Fraser
    63.9   */
   63.10  
   63.11 -
   63.12  #ifndef __XEN_PUBLIC_DOM0_OPS_H__
   63.13  #define __XEN_PUBLIC_DOM0_OPS_H__
   63.14  
   63.15  #include "xen.h"
   63.16 -#include "sched_ctl.h"
   63.17 +#include "platform.h"
   63.18  
   63.19 -/*
   63.20 - * Make sure you increment the interface version whenever you modify this file!
   63.21 - * This makes sure that old versions of dom0 tools will stop working in a
   63.22 - * well-defined way (rather than crashing the machine, for instance).
   63.23 - * 
   63.24 - * Separate kernel from tools as the kernel uses a small subset of the dom0
   63.25 - * operations and so it is unnecessary to break backward compatibility so
   63.26 - * often.
   63.27 - */
   63.28 -#define DOM0_TOOLS_INTERFACE_VERSION  0x13000001
   63.29 -#define DOM0_KERNEL_INTERFACE_VERSION 0x03000001
   63.30 -
   63.31 -#ifdef __XEN_TOOLS__
   63.32 -#define DOM0_INTERFACE_VERSION DOM0_TOOLS_INTERFACE_VERSION
   63.33 -#else
   63.34 -#define DOM0_INTERFACE_VERSION DOM0_KERNEL_INTERFACE_VERSION
   63.35 +#if __XEN_INTERFACE_VERSION__ >= 0x00030204
   63.36 +#error "dom0_ops.h is a compatibility interface only"
   63.37  #endif
   63.38  
   63.39 -/************************************************************************/
   63.40 +#define DOM0_INTERFACE_VERSION XENPF_INTERFACE_VERSION
   63.41  
   63.42 -#define DOM0_GETMEMLIST        2
   63.43 -struct dom0_getmemlist {
   63.44 -    /* IN variables. */
   63.45 -    domid_t       domain;
   63.46 -    uint64_t max_pfns;
   63.47 -    XEN_GUEST_HANDLE(xen_pfn_t) buffer;
   63.48 -    /* OUT variables. */
   63.49 -    uint64_t num_pfns;
   63.50 -};
   63.51 -typedef struct dom0_getmemlist dom0_getmemlist_t;
   63.52 -DEFINE_XEN_GUEST_HANDLE(dom0_getmemlist_t);
   63.53 -
   63.54 -#define DOM0_SCHEDCTL          6
   63.55 - /* struct sched_ctl_cmd is from sched-ctl.h   */
   63.56 -typedef struct sched_ctl_cmd dom0_schedctl_t;
   63.57 -DEFINE_XEN_GUEST_HANDLE(dom0_schedctl_t);
   63.58 +#define DOM0_SETTIME          XENPF_settime
   63.59 +#define dom0_settime          xenpf_settime
   63.60 +#define dom0_settime_t        xenpf_settime_t
   63.61  
   63.62 -#define DOM0_ADJUSTDOM         7
   63.63 -/* struct sched_adjdom_cmd is from sched-ctl.h */
   63.64 -typedef struct sched_adjdom_cmd dom0_adjustdom_t;
   63.65 -DEFINE_XEN_GUEST_HANDLE(dom0_adjustdom_t);
   63.66 +#define DOM0_ADD_MEMTYPE      XENPF_add_memtype
   63.67 +#define dom0_add_memtype      xenpf_add_memtype
   63.68 +#define dom0_add_memtype_t    xenpf_add_memtype_t
   63.69  
   63.70 -#define DOM0_CREATEDOMAIN      8
   63.71 -struct dom0_createdomain {
   63.72 -    /* IN parameters */
   63.73 -    uint32_t ssidref;
   63.74 -    xen_domain_handle_t handle;
   63.75 -    /* IN/OUT parameters. */
   63.76 -    /* Identifier for new domain (auto-allocate if zero is specified). */
   63.77 -    domid_t domain;
   63.78 -};
   63.79 -typedef struct dom0_createdomain dom0_createdomain_t;
   63.80 -DEFINE_XEN_GUEST_HANDLE(dom0_createdomain_t);
   63.81 -
   63.82 -#define DOM0_DESTROYDOMAIN     9
   63.83 -struct dom0_destroydomain {
   63.84 -    /* IN variables. */
   63.85 -    domid_t domain;
   63.86 -};
   63.87 -typedef struct dom0_destroydomain dom0_destroydomain_t;
   63.88 -DEFINE_XEN_GUEST_HANDLE(dom0_destroydomain_t);
   63.89 +#define DOM0_DEL_MEMTYPE      XENPF_del_memtype
   63.90 +#define dom0_del_memtype      xenpf_del_memtype
   63.91 +#define dom0_del_memtype_t    xenpf_del_memtype_t
   63.92  
   63.93 -#define DOM0_PAUSEDOMAIN      10
   63.94 -struct dom0_pausedomain {
   63.95 -    /* IN parameters. */
   63.96 -    domid_t domain;
   63.97 -};
   63.98 -typedef struct dom0_pausedomain dom0_pausedomain_t;
   63.99 -DEFINE_XEN_GUEST_HANDLE(dom0_pausedomain_t);
  63.100 +#define DOM0_READ_MEMTYPE     XENPF_read_memtype
  63.101 +#define dom0_read_memtype     xenpf_read_memtype
  63.102 +#define dom0_read_memtype_t   xenpf_read_memtype_t
  63.103  
  63.104 -#define DOM0_UNPAUSEDOMAIN    11
  63.105 -struct dom0_unpausedomain {
  63.106 -    /* IN parameters. */
  63.107 -    domid_t domain;
  63.108 -};
  63.109 -typedef struct dom0_unpausedomain dom0_unpausedomain_t;
  63.110 -DEFINE_XEN_GUEST_HANDLE(dom0_unpausedomain_t);
  63.111 +#define DOM0_MICROCODE        XENPF_microcode_update
  63.112 +#define dom0_microcode        xenpf_microcode_update
  63.113 +#define dom0_microcode_t      xenpf_microcode_update_t
  63.114  
  63.115 -#define DOM0_GETDOMAININFO    12
  63.116 -struct dom0_getdomaininfo {
  63.117 -    /* IN variables. */
  63.118 -    domid_t  domain;                  /* NB. IN/OUT variable. */
  63.119 -    /* OUT variables. */
  63.120 -#define DOMFLAGS_DYING     (1<<0) /* Domain is scheduled to die.             */
  63.121 -#define DOMFLAGS_SHUTDOWN  (1<<2) /* The guest OS has shut down.             */
  63.122 -#define DOMFLAGS_PAUSED    (1<<3) /* Currently paused by control software.   */
  63.123 -#define DOMFLAGS_BLOCKED   (1<<4) /* Currently blocked pending an event.     */
  63.124 -#define DOMFLAGS_RUNNING   (1<<5) /* Domain is currently running.            */
  63.125 -#define DOMFLAGS_CPUMASK      255 /* CPU to which this domain is bound.      */
  63.126 -#define DOMFLAGS_CPUSHIFT       8
  63.127 -#define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code.  */
  63.128 -#define DOMFLAGS_SHUTDOWNSHIFT 16
  63.129 -    uint32_t flags;
  63.130 -    uint64_t tot_pages;
  63.131 -    uint64_t max_pages;
  63.132 -    xen_pfn_t shared_info_frame;  /* MFN of shared_info struct */
  63.133 -    uint64_t cpu_time;
  63.134 -    uint32_t nr_online_vcpus;     /* Number of VCPUs currently online. */
  63.135 -    uint32_t max_vcpu_id;         /* Maximum VCPUID in use by this domain. */
  63.136 -    uint32_t ssidref;
  63.137 -    xen_domain_handle_t handle;
  63.138 -};
  63.139 -typedef struct dom0_getdomaininfo dom0_getdomaininfo_t;
  63.140 -DEFINE_XEN_GUEST_HANDLE(dom0_getdomaininfo_t);
  63.141 +#define DOM0_PLATFORM_QUIRK   XENPF_platform_quirk
  63.142 +#define dom0_platform_quirk   xenpf_platform_quirk
  63.143 +#define dom0_platform_quirk_t xenpf_platform_quirk_t
  63.144  
  63.145 -#define DOM0_SETVCPUCONTEXT   13
  63.146 -struct dom0_setvcpucontext {
  63.147 -    /* IN variables. */
  63.148 -    domid_t               domain;
  63.149 -    uint32_t              vcpu;
  63.150 -    /* IN/OUT parameters */
  63.151 -    XEN_GUEST_HANDLE(vcpu_guest_context_t) ctxt;
  63.152 -};
  63.153 -typedef struct dom0_setvcpucontext dom0_setvcpucontext_t;
  63.154 -DEFINE_XEN_GUEST_HANDLE(dom0_setvcpucontext_t);
  63.155 +typedef uint64_t cpumap_t;
  63.156  
  63.157 -#define DOM0_MSR              15
  63.158 +/* Unsupported legacy operation -- defined for API compatibility. */
  63.159 +#define DOM0_MSR                 15
  63.160  struct dom0_msr {
  63.161      /* IN variables. */
  63.162      uint32_t write;
  63.163 @@ -145,361 +61,8 @@ struct dom0_msr {
  63.164  typedef struct dom0_msr dom0_msr_t;
  63.165  DEFINE_XEN_GUEST_HANDLE(dom0_msr_t);
  63.166  
  63.167 -/*
  63.168 - * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
  63.169 - * 1 January, 1970 if the current system time was <system_time>.
  63.170 - */
  63.171 -#define DOM0_SETTIME          17
  63.172 -struct dom0_settime {
  63.173 -    /* IN variables. */
  63.174 -    uint32_t secs;
  63.175 -    uint32_t nsecs;
  63.176 -    uint64_t system_time;
  63.177 -};
  63.178 -typedef struct dom0_settime dom0_settime_t;
  63.179 -DEFINE_XEN_GUEST_HANDLE(dom0_settime_t);
  63.180 -
  63.181 -#define DOM0_GETPAGEFRAMEINFO 18
  63.182 -#define LTAB_SHIFT 28
  63.183 -#define NOTAB 0         /* normal page */
  63.184 -#define L1TAB (1<<LTAB_SHIFT)
  63.185 -#define L2TAB (2<<LTAB_SHIFT)
  63.186 -#define L3TAB (3<<LTAB_SHIFT)
  63.187 -#define L4TAB (4<<LTAB_SHIFT)
  63.188 -#define LPINTAB  (1<<31)
  63.189 -#define XTAB  (0xf<<LTAB_SHIFT) /* invalid page */
  63.190 -#define LTAB_MASK XTAB
  63.191 -#define LTABTYPE_MASK (0x7<<LTAB_SHIFT)
  63.192 -
  63.193 -struct dom0_getpageframeinfo {
  63.194 -    /* IN variables. */
  63.195 -    xen_pfn_t gmfn;        /* GMFN to query.                            */
  63.196 -    domid_t domain;        /* To which domain does the frame belong?    */
  63.197 -    /* OUT variables. */
  63.198 -    /* Is the page PINNED to a type? */
  63.199 -    uint32_t type;         /* see above type defs */
  63.200 -};
  63.201 -typedef struct dom0_getpageframeinfo dom0_getpageframeinfo_t;
  63.202 -DEFINE_XEN_GUEST_HANDLE(dom0_getpageframeinfo_t);
  63.203 -
  63.204 -/*
  63.205 - * Read console content from Xen buffer ring.
  63.206 - */
  63.207 -#define DOM0_READCONSOLE      19
  63.208 -struct dom0_readconsole {
  63.209 -    /* IN variables. */
  63.210 -    uint32_t clear;                /* Non-zero -> clear after reading. */
  63.211 -    XEN_GUEST_HANDLE(char) buffer; /* Buffer start */
  63.212 -    /* IN/OUT variables. */
  63.213 -    uint32_t count;            /* In: Buffer size;  Out: Used buffer size  */
  63.214 -};
  63.215 -typedef struct dom0_readconsole dom0_readconsole_t;
  63.216 -DEFINE_XEN_GUEST_HANDLE(dom0_readconsole_t);
  63.217 -
  63.218 -/*
  63.219 - * Set which physical cpus a vcpu can execute on.
  63.220 - */
  63.221 -#define DOM0_SETVCPUAFFINITY  20
  63.222 -struct dom0_setvcpuaffinity {
  63.223 -    /* IN variables. */
  63.224 -    domid_t   domain;
  63.225 -    uint32_t  vcpu;
  63.226 -    cpumap_t  cpumap;
  63.227 -};
  63.228 -typedef struct dom0_setvcpuaffinity dom0_setvcpuaffinity_t;
  63.229 -DEFINE_XEN_GUEST_HANDLE(dom0_setvcpuaffinity_t);
  63.230 -
  63.231 -/* Get trace buffers machine base address */
  63.232 -#define DOM0_TBUFCONTROL       21
  63.233 -struct dom0_tbufcontrol {
  63.234 -    /* IN variables */
  63.235 -#define DOM0_TBUF_GET_INFO     0
  63.236 -#define DOM0_TBUF_SET_CPU_MASK 1
  63.237 -#define DOM0_TBUF_SET_EVT_MASK 2
  63.238 -#define DOM0_TBUF_SET_SIZE     3
  63.239 -#define DOM0_TBUF_ENABLE       4
  63.240 -#define DOM0_TBUF_DISABLE      5
  63.241 -    uint32_t      op;
  63.242 -    /* IN/OUT variables */
  63.243 -    cpumap_t      cpu_mask;
  63.244 -    uint32_t      evt_mask;
  63.245 -    /* OUT variables */
  63.246 -    xen_pfn_t buffer_mfn;
  63.247 -    uint32_t size;
  63.248 -};
  63.249 -typedef struct dom0_tbufcontrol dom0_tbufcontrol_t;
  63.250 -DEFINE_XEN_GUEST_HANDLE(dom0_tbufcontrol_t);
  63.251 -
  63.252 -/*
  63.253 - * Get physical information about the host machine
  63.254 - */
  63.255 -#define DOM0_PHYSINFO         22
  63.256 -struct dom0_physinfo {
  63.257 -    uint32_t threads_per_core;
  63.258 -    uint32_t cores_per_socket;
  63.259 -    uint32_t sockets_per_node;
  63.260 -    uint32_t nr_nodes;
  63.261 -    uint32_t cpu_khz;
  63.262 -    uint64_t total_pages;
  63.263 -    uint64_t free_pages;
  63.264 -    uint64_t scrub_pages;
  63.265 -    uint32_t hw_cap[8];
  63.266 -};
  63.267 -typedef struct dom0_physinfo dom0_physinfo_t;
  63.268 -DEFINE_XEN_GUEST_HANDLE(dom0_physinfo_t);
  63.269 -
  63.270 -/*
  63.271 - * Get the ID of the current scheduler.
  63.272 - */
  63.273 -#define DOM0_SCHED_ID        24
  63.274 -struct dom0_sched_id {
  63.275 -    /* OUT variable */
  63.276 -    uint32_t sched_id;
  63.277 -};
  63.278 -typedef struct dom0_physinfo dom0_sched_id_t;
  63.279 -DEFINE_XEN_GUEST_HANDLE(dom0_sched_id_t);
  63.280 -
  63.281 -/*
  63.282 - * Control shadow pagetables operation
  63.283 - */
  63.284 -#define DOM0_SHADOW_CONTROL  25
  63.285 -
  63.286 -/* Disable shadow mode. */
  63.287 -#define DOM0_SHADOW_CONTROL_OP_OFF         0
  63.288 -
  63.289 -/* Enable shadow mode (mode contains ORed DOM0_SHADOW_ENABLE_* flags). */
  63.290 -#define DOM0_SHADOW_CONTROL_OP_ENABLE      32
  63.291 -
  63.292 -/* Log-dirty bitmap operations. */
  63.293 - /* Return the bitmap and clean internal copy for next round. */
  63.294 -#define DOM0_SHADOW_CONTROL_OP_CLEAN       11
  63.295 - /* Return the bitmap but do not modify internal copy. */
  63.296 -#define DOM0_SHADOW_CONTROL_OP_PEEK        12
  63.297 -
  63.298 -/* Memory allocation accessors. */
  63.299 -#define DOM0_SHADOW_CONTROL_OP_GET_ALLOCATION   30
  63.300 -#define DOM0_SHADOW_CONTROL_OP_SET_ALLOCATION   31
  63.301 -
  63.302 -/* Legacy enable operations. */
  63.303 - /* Equiv. to ENABLE with no mode flags. */
  63.304 -#define DOM0_SHADOW_CONTROL_OP_ENABLE_TEST       1
  63.305 - /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */
  63.306 -#define DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY   2
  63.307 - /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */
  63.308 -#define DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE  3
  63.309 -
  63.310 -/* Mode flags for DOM0_SHADOW_CONTROL_OP_ENABLE. */
  63.311 - /*
  63.312 -  * Shadow pagetables are refcounted: guest does not use explicit mmu
  63.313 -  * operations nor write-protect its pagetables.
  63.314 -  */
  63.315 -#define DOM0_SHADOW_ENABLE_REFCOUNT  (1 << 1)
  63.316 - /*
  63.317 -  * Log pages in a bitmap as they are dirtied.
  63.318 -  * Used for live relocation to determine which pages must be re-sent.
  63.319 -  */
  63.320 -#define DOM0_SHADOW_ENABLE_LOG_DIRTY (1 << 2)
  63.321 - /*
  63.322 -  * Automatically translate GPFNs into MFNs.
  63.323 -  */
  63.324 -#define DOM0_SHADOW_ENABLE_TRANSLATE (1 << 3)
  63.325 - /*
  63.326 -  * Xen does not steal virtual address space from the guest.
  63.327 -  * Requires HVM support.
  63.328 -  */
  63.329 -#define DOM0_SHADOW_ENABLE_EXTERNAL  (1 << 4)
  63.330 -
  63.331 -struct dom0_shadow_control_stats {
  63.332 -    uint32_t fault_count;
  63.333 -    uint32_t dirty_count;
  63.334 -};
  63.335 -typedef struct dom0_shadow_control_stats dom0_shadow_control_stats_t;
  63.336 -DEFINE_XEN_GUEST_HANDLE(dom0_shadow_control_stats_t);
  63.337 -
  63.338 -struct dom0_shadow_control {
  63.339 -    /* IN variables. */
  63.340 -    domid_t        domain;
  63.341 -    uint32_t       op;       /* DOM0_SHADOW_CONTROL_OP_* */
  63.342 -
  63.343 -    /* OP_ENABLE */
  63.344 -    uint32_t       mode;     /* DOM0_SHADOW_ENABLE_* */
  63.345 -
  63.346 -    /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */
  63.347 -    uint32_t       mb;       /* Shadow memory allocation in MB */
  63.348 -
  63.349 -    /* OP_PEEK / OP_CLEAN */
  63.350 -    XEN_GUEST_HANDLE(ulong) dirty_bitmap;
  63.351 -    uint64_t       pages;    /* Size of buffer. Updated with actual size. */
  63.352 -    struct dom0_shadow_control_stats stats;
  63.353 -};
  63.354 -typedef struct dom0_shadow_control dom0_shadow_control_t;
  63.355 -DEFINE_XEN_GUEST_HANDLE(dom0_shadow_control_t);
  63.356 -
  63.357 -#define DOM0_SETDOMAINMAXMEM   28
  63.358 -struct dom0_setdomainmaxmem {
  63.359 -    /* IN variables. */
  63.360 -    domid_t  domain;
  63.361 -    uint64_t max_memkb;
  63.362 -};
  63.363 -typedef struct dom0_setdomainmaxmem dom0_setdomainmaxmem_t;
  63.364 -DEFINE_XEN_GUEST_HANDLE(dom0_setdomainmaxmem_t);
  63.365 -
  63.366 -#define DOM0_GETPAGEFRAMEINFO2 29   /* batched interface */
  63.367 -struct dom0_getpageframeinfo2 {
  63.368 -    /* IN variables. */
  63.369 -    domid_t  domain;
  63.370 -    uint64_t num;
  63.371 -    /* IN/OUT variables. */
  63.372 -    XEN_GUEST_HANDLE(ulong) array;
  63.373 -};
  63.374 -typedef struct dom0_getpageframeinfo2 dom0_getpageframeinfo2_t;
  63.375 -DEFINE_XEN_GUEST_HANDLE(dom0_getpageframeinfo2_t);
  63.376 -
  63.377 -/*
  63.378 - * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type.
  63.379 - * On x86, @type is an architecture-defined MTRR memory type.
  63.380 - * On success, returns the MTRR that was used (@reg) and a handle that can
  63.381 - * be passed to DOM0_DEL_MEMTYPE to accurately tear down the new setting.
  63.382 - * (x86-specific).
  63.383 - */
  63.384 -#define DOM0_ADD_MEMTYPE         31
  63.385 -struct dom0_add_memtype {
  63.386 -    /* IN variables. */
  63.387 -    xen_pfn_t mfn;
  63.388 -    uint64_t nr_mfns;
  63.389 -    uint32_t type;
  63.390 -    /* OUT variables. */
  63.391 -    uint32_t handle;
  63.392 -    uint32_t reg;
  63.393 -};
  63.394 -typedef struct dom0_add_memtype dom0_add_memtype_t;
  63.395 -DEFINE_XEN_GUEST_HANDLE(dom0_add_memtype_t);
  63.396 -
  63.397 -/*
  63.398 - * Tear down an existing memory-range type. If @handle is remembered then it
  63.399 - * should be passed in to accurately tear down the correct setting (in case
  63.400 - * of overlapping memory regions with differing types). If it is not known
  63.401 - * then @handle should be set to zero. In all cases @reg must be set.
  63.402 - * (x86-specific).
  63.403 - */
  63.404 -#define DOM0_DEL_MEMTYPE         32
  63.405 -struct dom0_del_memtype {
  63.406 -    /* IN variables. */
  63.407 -    uint32_t handle;
  63.408 -    uint32_t reg;
  63.409 -};
  63.410 -typedef struct dom0_del_memtype dom0_del_memtype_t;
  63.411 -DEFINE_XEN_GUEST_HANDLE(dom0_del_memtype_t);
  63.412 -
  63.413 -/* Read current type of an MTRR (x86-specific). */
  63.414 -#define DOM0_READ_MEMTYPE        33
  63.415 -struct dom0_read_memtype {
  63.416 -    /* IN variables. */
  63.417 -    uint32_t reg;
  63.418 -    /* OUT variables. */
  63.419 -    xen_pfn_t mfn;
  63.420 -    uint64_t nr_mfns;
  63.421 -    uint32_t type;
  63.422 -};
  63.423 -typedef struct dom0_read_memtype dom0_read_memtype_t;
  63.424 -DEFINE_XEN_GUEST_HANDLE(dom0_read_memtype_t);
  63.425 -
  63.426 -/* Interface for controlling Xen software performance counters. */
  63.427 -#define DOM0_PERFCCONTROL        34
  63.428 -/* Sub-operations: */
  63.429 -#define DOM0_PERFCCONTROL_OP_RESET 1   /* Reset all counters to zero. */
  63.430 -#define DOM0_PERFCCONTROL_OP_QUERY 2   /* Get perfctr information. */
  63.431 -struct dom0_perfc_desc {
  63.432 -    char         name[80];             /* name of perf counter */
  63.433 -    uint32_t     nr_vals;              /* number of values for this counter */
  63.434 -};
  63.435 -typedef struct dom0_perfc_desc dom0_perfc_desc_t;
  63.436 -DEFINE_XEN_GUEST_HANDLE(dom0_perfc_desc_t);
  63.437 -typedef uint32_t dom0_perfc_val_t;
  63.438 -DEFINE_XEN_GUEST_HANDLE(dom0_perfc_val_t);
  63.439 -
  63.440 -struct dom0_perfccontrol {
  63.441 -    /* IN variables. */
  63.442 -    uint32_t       op;                /*  DOM0_PERFCCONTROL_OP_??? */
  63.443 -    /* OUT variables. */
  63.444 -    uint32_t       nr_counters;       /*  number of counters description  */
  63.445 -    uint32_t       nr_vals;			  /*  number of values  */
  63.446 -    XEN_GUEST_HANDLE(dom0_perfc_desc_t) desc; /*  counter information (or NULL) */
  63.447 -    XEN_GUEST_HANDLE(dom0_perfc_val_t) val;   /*  counter values (or NULL)  */
  63.448 -};
  63.449 -typedef struct dom0_perfccontrol dom0_perfccontrol_t;
  63.450 -DEFINE_XEN_GUEST_HANDLE(dom0_perfccontrol_t);
  63.451 -
  63.452 -#define DOM0_MICROCODE           35
  63.453 -struct dom0_microcode {
  63.454 -    /* IN variables. */
  63.455 -    XEN_GUEST_HANDLE(void) data;          /* Pointer to microcode data */
  63.456 -    uint32_t length;                  /* Length of microcode data. */
  63.457 -};
  63.458 -typedef struct dom0_microcode dom0_microcode_t;
  63.459 -DEFINE_XEN_GUEST_HANDLE(dom0_microcode_t);
  63.460 -
  63.461 -#define DOM0_IOPORT_PERMISSION   36
  63.462 -struct dom0_ioport_permission {
  63.463 -    domid_t  domain;                  /* domain to be affected */
  63.464 -    uint32_t first_port;              /* first port int range */
  63.465 -    uint32_t nr_ports;                /* size of port range */
  63.466 -    uint8_t  allow_access;            /* allow or deny access to range? */
  63.467 -};
  63.468 -typedef struct dom0_ioport_permission dom0_ioport_permission_t;
  63.469 -DEFINE_XEN_GUEST_HANDLE(dom0_ioport_permission_t);
  63.470 -
  63.471 -#define DOM0_GETVCPUCONTEXT      37
  63.472 -struct dom0_getvcpucontext {
  63.473 -    /* IN variables. */
  63.474 -    domid_t  domain;                  /* domain to be affected */
  63.475 -    uint32_t vcpu;                    /* vcpu # */
  63.476 -    /* OUT variables. */
  63.477 -    XEN_GUEST_HANDLE(vcpu_guest_context_t) ctxt;
  63.478 -};
  63.479 -typedef struct dom0_getvcpucontext dom0_getvcpucontext_t;
  63.480 -DEFINE_XEN_GUEST_HANDLE(dom0_getvcpucontext_t);
  63.481 -
  63.482 -#define DOM0_GETVCPUINFO         43
  63.483 -struct dom0_getvcpuinfo {
  63.484 -    /* IN variables. */
  63.485 -    domid_t  domain;                  /* domain to be affected */
  63.486 -    uint32_t vcpu;                    /* vcpu # */
  63.487 -    /* OUT variables. */
  63.488 -    uint8_t  online;                  /* currently online (not hotplugged)? */
  63.489 -    uint8_t  blocked;                 /* blocked waiting for an event? */
  63.490 -    uint8_t  running;                 /* currently scheduled on its CPU? */
  63.491 -    uint64_t cpu_time;                /* total cpu time consumed (ns) */
  63.492 -    uint32_t cpu;                     /* current mapping   */
  63.493 -    cpumap_t cpumap;                  /* allowable mapping */
  63.494 -};
  63.495 -typedef struct dom0_getvcpuinfo dom0_getvcpuinfo_t;
  63.496 -DEFINE_XEN_GUEST_HANDLE(dom0_getvcpuinfo_t);
  63.497 -
  63.498 -#define DOM0_GETDOMAININFOLIST   38
  63.499 -struct dom0_getdomaininfolist {
  63.500 -    /* IN variables. */
  63.501 -    domid_t               first_domain;
  63.502 -    uint32_t              max_domains;
  63.503 -    XEN_GUEST_HANDLE(dom0_getdomaininfo_t) buffer;
  63.504 -    /* OUT variables. */
  63.505 -    uint32_t              num_domains;
  63.506 -};
  63.507 -typedef struct dom0_getdomaininfolist dom0_getdomaininfolist_t;
  63.508 -DEFINE_XEN_GUEST_HANDLE(dom0_getdomaininfolist_t);
  63.509 -
  63.510 -#define DOM0_PLATFORM_QUIRK      39
  63.511 -#define QUIRK_NOIRQBALANCING      1 /* Do not restrict IO-APIC RTE targets */
  63.512 -#define QUIRK_IOAPIC_BAD_REGSEL   2 /* IO-APIC REGSEL forgets its value    */
  63.513 -#define QUIRK_IOAPIC_GOOD_REGSEL  3 /* IO-APIC REGSEL behaves properly     */
  63.514 -struct dom0_platform_quirk {
  63.515 -    /* IN variables. */
  63.516 -    uint32_t quirk_id;
  63.517 -};
  63.518 -typedef struct dom0_platform_quirk dom0_platform_quirk_t;
  63.519 -DEFINE_XEN_GUEST_HANDLE(dom0_platform_quirk_t);
  63.520 -
  63.521 -#define DOM0_PHYSICAL_MEMORY_MAP 40   /* Unimplemented from 3.0.3 onwards */
  63.522 +/* Unsupported legacy operation -- defined for API compatibility. */
  63.523 +#define DOM0_PHYSICAL_MEMORY_MAP 40
  63.524  struct dom0_memory_map_entry {
  63.525      uint64_t start, end;
  63.526      uint32_t flags; /* reserved */
  63.527 @@ -508,135 +71,18 @@ struct dom0_memory_map_entry {
  63.528  typedef struct dom0_memory_map_entry dom0_memory_map_entry_t;
  63.529  DEFINE_XEN_GUEST_HANDLE(dom0_memory_map_entry_t);
  63.530  
  63.531 -struct dom0_physical_memory_map {
  63.532 -    /* IN variables. */
  63.533 -    uint32_t max_map_entries;
  63.534 -    /* OUT variables. */
  63.535 -    uint32_t nr_map_entries;
  63.536 -    XEN_GUEST_HANDLE(dom0_memory_map_entry_t) memory_map;
  63.537 -};
  63.538 -typedef struct dom0_physical_memory_map dom0_physical_memory_map_t;
  63.539 -DEFINE_XEN_GUEST_HANDLE(dom0_physical_memory_map_t);
  63.540 -
  63.541 -#define DOM0_MAX_VCPUS 41
  63.542 -struct dom0_max_vcpus {
  63.543 -    domid_t  domain;        /* domain to be affected */
  63.544 -    uint32_t max;           /* maximum number of vcpus */
  63.545 -};
  63.546 -typedef struct dom0_max_vcpus dom0_max_vcpus_t;
  63.547 -DEFINE_XEN_GUEST_HANDLE(dom0_max_vcpus_t);
  63.548 -
  63.549 -#define DOM0_SETDOMAINHANDLE 44
  63.550 -struct dom0_setdomainhandle {
  63.551 -    domid_t domain;
  63.552 -    xen_domain_handle_t handle;
  63.553 -};
  63.554 -typedef struct dom0_setdomainhandle dom0_setdomainhandle_t;
  63.555 -DEFINE_XEN_GUEST_HANDLE(dom0_setdomainhandle_t);
  63.556 -
  63.557 -#define DOM0_SETDEBUGGING 45
  63.558 -struct dom0_setdebugging {
  63.559 -    domid_t domain;
  63.560 -    uint8_t enable;
  63.561 -};
  63.562 -typedef struct dom0_setdebugging dom0_setdebugging_t;
  63.563 -DEFINE_XEN_GUEST_HANDLE(dom0_setdebugging_t);
  63.564 -
  63.565 -#define DOM0_IRQ_PERMISSION 46
  63.566 -struct dom0_irq_permission {
  63.567 -    domid_t domain;          /* domain to be affected */
  63.568 -    uint8_t pirq;
  63.569 -    uint8_t allow_access;    /* flag to specify enable/disable of IRQ access */
  63.570 -};
  63.571 -typedef struct dom0_irq_permission dom0_irq_permission_t;
  63.572 -DEFINE_XEN_GUEST_HANDLE(dom0_irq_permission_t);
  63.573 -
  63.574 -#define DOM0_IOMEM_PERMISSION 47
  63.575 -struct dom0_iomem_permission {
  63.576 -    domid_t  domain;          /* domain to be affected */
  63.577 -    xen_pfn_t first_mfn;      /* first page (physical page number) in range */
  63.578 -    uint64_t nr_mfns;         /* number of pages in range (>0) */
  63.579 -    uint8_t allow_access;     /* allow (!0) or deny (0) access to range? */
  63.580 -};
  63.581 -typedef struct dom0_iomem_permission dom0_iomem_permission_t;
  63.582 -DEFINE_XEN_GUEST_HANDLE(dom0_iomem_permission_t);
  63.583 -
  63.584 -#define DOM0_HYPERCALL_INIT   48
  63.585 -struct dom0_hypercall_init {
  63.586 -    domid_t   domain;          /* domain to be affected */
  63.587 -    xen_pfn_t gmfn;            /* GMFN to be initialised */
  63.588 -};
  63.589 -typedef struct dom0_hypercall_init dom0_hypercall_init_t;
  63.590 -DEFINE_XEN_GUEST_HANDLE(dom0_hypercall_init_t);
  63.591 -
  63.592 -#define DOM0_DOMAIN_SETUP     49
  63.593 -#define _XEN_DOMAINSETUP_hvm_guest 0
  63.594 -#define XEN_DOMAINSETUP_hvm_guest  (1UL<<_XEN_DOMAINSETUP_hvm_guest)
  63.595 -#define _XEN_DOMAINSETUP_query 1	/* Get parameters (for save)  */
  63.596 -#define XEN_DOMAINSETUP_query  (1UL<<_XEN_DOMAINSETUP_query)
  63.597 -typedef struct dom0_domain_setup {
  63.598 -    domid_t  domain;          /* domain to be affected */
  63.599 -    unsigned long flags;      /* XEN_DOMAINSETUP_* */
  63.600 -#ifdef __ia64__
  63.601 -    unsigned long bp;         /* mpaddr of boot param area */
  63.602 -    unsigned long maxmem;	  /* Highest memory address for MDT.  */
  63.603 -    unsigned long xsi_va;     /* Xen shared_info area virtual address.  */
  63.604 -    unsigned int hypercall_imm;	/* Break imm for Xen hypercalls.  */
  63.605 -#endif
  63.606 -} dom0_domain_setup_t;
  63.607 -DEFINE_XEN_GUEST_HANDLE(dom0_domain_setup_t);
  63.608 -
  63.609 -#define DOM0_SETTIMEOFFSET    50
  63.610 -struct dom0_settimeoffset {
  63.611 -    domid_t  domain;
  63.612 -    int32_t  time_offset_seconds; /* applied to domain wallclock time */
  63.613 -};
  63.614 -typedef struct dom0_settimeoffset dom0_settimeoffset_t;
  63.615 -DEFINE_XEN_GUEST_HANDLE(dom0_settimeoffset_t);
  63.616 -
  63.617  struct dom0_op {
  63.618      uint32_t cmd;
  63.619      uint32_t interface_version; /* DOM0_INTERFACE_VERSION */
  63.620      union {
  63.621 -        struct dom0_createdomain      createdomain;
  63.622 -        struct dom0_pausedomain       pausedomain;
  63.623 -        struct dom0_unpausedomain     unpausedomain;
  63.624 -        struct dom0_destroydomain     destroydomain;
  63.625 -        struct dom0_getmemlist        getmemlist;
  63.626 -        struct sched_ctl_cmd          schedctl;
  63.627 -        struct sched_adjdom_cmd       adjustdom;
  63.628 -        struct dom0_setvcpucontext    setvcpucontext;
  63.629 -        struct dom0_getdomaininfo     getdomaininfo;
  63.630 -        struct dom0_getpageframeinfo  getpageframeinfo;
  63.631          struct dom0_msr               msr;
  63.632          struct dom0_settime           settime;
  63.633 -        struct dom0_readconsole       readconsole;
  63.634 -        struct dom0_setvcpuaffinity   setvcpuaffinity;
  63.635 -        struct dom0_tbufcontrol       tbufcontrol;
  63.636 -        struct dom0_physinfo          physinfo;
  63.637 -        struct dom0_sched_id          sched_id;
  63.638 -        struct dom0_shadow_control    shadow_control;
  63.639 -        struct dom0_setdomainmaxmem   setdomainmaxmem;
  63.640 -        struct dom0_getpageframeinfo2 getpageframeinfo2;
  63.641          struct dom0_add_memtype       add_memtype;
  63.642          struct dom0_del_memtype       del_memtype;
  63.643          struct dom0_read_memtype      read_memtype;
  63.644 -        struct dom0_perfccontrol      perfccontrol;
  63.645          struct dom0_microcode         microcode;
  63.646 -        struct dom0_ioport_permission ioport_permission;
  63.647 -        struct dom0_getvcpucontext    getvcpucontext;
  63.648 -        struct dom0_getvcpuinfo       getvcpuinfo;
  63.649 -        struct dom0_getdomaininfolist getdomaininfolist;
  63.650          struct dom0_platform_quirk    platform_quirk;
  63.651 -        struct dom0_physical_memory_map physical_memory_map;
  63.652 -        struct dom0_max_vcpus         max_vcpus;
  63.653 -        struct dom0_setdomainhandle   setdomainhandle;
  63.654 -        struct dom0_setdebugging      setdebugging;
  63.655 -        struct dom0_irq_permission    irq_permission;
  63.656 -        struct dom0_iomem_permission  iomem_permission;
  63.657 -        struct dom0_hypercall_init    hypercall_init;
  63.658 -        struct dom0_domain_setup      domain_setup;
  63.659 -        struct dom0_settimeoffset     settimeoffset;
  63.660 +        struct dom0_memory_map_entry  physical_memory_map;
  63.661          uint8_t                       pad[128];
  63.662      } u;
  63.663  };
    64.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    64.2 +++ b/xen/include/public/domctl.h	Fri Aug 25 18:39:10 2006 +0100
    64.3 @@ -0,0 +1,393 @@
    64.4 +/******************************************************************************
    64.5 + * domctl.h
    64.6 + * 
    64.7 + * Domain management operations. For use by node control stack.
    64.8 + * 
    64.9 + * Copyright (c) 2002-2003, B Dragovic
   64.10 + * Copyright (c) 2002-2006, K Fraser
   64.11 + */
   64.12 +
   64.13 +#ifndef __XEN_PUBLIC_DOMCTL_H__
   64.14 +#define __XEN_PUBLIC_DOMCTL_H__
   64.15 +
   64.16 +#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
   64.17 +#error "domctl operations are intended for use by node control tools only"
   64.18 +#endif
   64.19 +
   64.20 +#include "xen.h"
   64.21 +
   64.22 +#define XEN_DOMCTL_INTERFACE_VERSION 0x00000001
   64.23 +
   64.24 +#define uint64_t uint64_aligned_t
   64.25 +
   64.26 +struct xenctl_cpumap {
   64.27 +    XEN_GUEST_HANDLE_64(uint8_t) bitmap;
   64.28 +    uint32_t nr_cpus;
   64.29 +};
   64.30 +
   64.31 +/*
   64.32 + * NB. xen_domctl.domain is an IN/OUT parameter for this operation.
   64.33 + * If it is specified as zero, an id is auto-allocated and returned.
   64.34 + */
   64.35 +#define XEN_DOMCTL_createdomain       1
   64.36 +struct xen_domctl_createdomain {
   64.37 +    /* IN parameters */
   64.38 +    uint32_t ssidref;
   64.39 +    xen_domain_handle_t handle;
   64.40 +};
   64.41 +typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
   64.42 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
   64.43 +
   64.44 +#define XEN_DOMCTL_destroydomain      2
   64.45 +#define XEN_DOMCTL_pausedomain        3
   64.46 +#define XEN_DOMCTL_unpausedomain      4
   64.47 +
   64.48 +#define XEN_DOMCTL_getdomaininfo      5
   64.49 +struct xen_domctl_getdomaininfo {
   64.50 +    /* OUT variables. */
   64.51 +    domid_t  domain;              /* Also echoed in domctl.domain */
   64.52 +#define DOMFLAGS_DYING     (1<<0) /* Domain is scheduled to die.             */
   64.53 +#define DOMFLAGS_SHUTDOWN  (1<<2) /* The guest OS has shut down.             */
   64.54 +#define DOMFLAGS_PAUSED    (1<<3) /* Currently paused by control software.   */
   64.55 +#define DOMFLAGS_BLOCKED   (1<<4) /* Currently blocked pending an event.     */
   64.56 +#define DOMFLAGS_RUNNING   (1<<5) /* Domain is currently running.            */
   64.57 +#define DOMFLAGS_CPUMASK      255 /* CPU to which this domain is bound.      */
   64.58 +#define DOMFLAGS_CPUSHIFT       8
   64.59 +#define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code.  */
   64.60 +#define DOMFLAGS_SHUTDOWNSHIFT 16
   64.61 +    uint32_t flags;
   64.62 +    uint64_t tot_pages;
   64.63 +    uint64_t max_pages;
   64.64 +    uint64_t shared_info_frame;  /* MFN of shared_info struct */
   64.65 +    uint64_t cpu_time;
   64.66 +    uint32_t nr_online_vcpus;     /* Number of VCPUs currently online. */
   64.67 +    uint32_t max_vcpu_id;         /* Maximum VCPUID in use by this domain. */
   64.68 +    uint32_t ssidref;
   64.69 +    xen_domain_handle_t handle;
   64.70 +};
   64.71 +typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t;
   64.72 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t);
   64.73 +
   64.74 +
   64.75 +#define XEN_DOMCTL_getmemlist         6
   64.76 +struct xen_domctl_getmemlist {
   64.77 +    /* IN variables. */
   64.78 +    uint64_t max_pfns;
   64.79 +    XEN_GUEST_HANDLE_64(ulong) buffer;
   64.80 +    /* OUT variables. */
   64.81 +    uint64_t num_pfns;
   64.82 +};
   64.83 +typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t;
   64.84 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t);
   64.85 +
   64.86 +
   64.87 +#define XEN_DOMCTL_getpageframeinfo   7
   64.88 +
   64.89 +#define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28
   64.90 +#define XEN_DOMCTL_PFINFO_NOTAB   (0x0<<28)
   64.91 +#define XEN_DOMCTL_PFINFO_L1TAB   (0x1<<28)
   64.92 +#define XEN_DOMCTL_PFINFO_L2TAB   (0x2<<28)
   64.93 +#define XEN_DOMCTL_PFINFO_L3TAB   (0x3<<28)
   64.94 +#define XEN_DOMCTL_PFINFO_L4TAB   (0x4<<28)
   64.95 +#define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7<<28)
   64.96 +#define XEN_DOMCTL_PFINFO_LPINTAB (0x1<<31)
   64.97 +#define XEN_DOMCTL_PFINFO_XTAB    (0xf<<28) /* invalid page */
   64.98 +#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xf<<28)
   64.99 +
  64.100 +struct xen_domctl_getpageframeinfo {
  64.101 +    /* IN variables. */
  64.102 +    uint64_t gmfn;        /* GMFN to query */
  64.103 +    /* OUT variables. */
  64.104 +    /* Is the page PINNED to a type? */
  64.105 +    uint32_t type;         /* see above type defs */
  64.106 +};
  64.107 +typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t;
  64.108 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t);
  64.109 +
  64.110 +
  64.111 +#define XEN_DOMCTL_getpageframeinfo2  8
  64.112 +struct xen_domctl_getpageframeinfo2 {
  64.113 +    /* IN variables. */
  64.114 +    uint64_t num;
  64.115 +    /* IN/OUT variables. */
  64.116 +    XEN_GUEST_HANDLE_64(ulong) array;
  64.117 +};
  64.118 +typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t;
  64.119 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t);
  64.120 +
  64.121 +
  64.122 +/*
  64.123 + * Control shadow pagetables operation
  64.124 + */
  64.125 +#define XEN_DOMCTL_shadow_op         10
  64.126 +
  64.127 +/* Disable shadow mode. */
  64.128 +#define XEN_DOMCTL_SHADOW_OP_OFF         0
  64.129 +
  64.130 +/* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */
  64.131 +#define XEN_DOMCTL_SHADOW_OP_ENABLE      32
  64.132 +
  64.133 +/* Log-dirty bitmap operations. */
  64.134 + /* Return the bitmap and clean internal copy for next round. */
  64.135 +#define XEN_DOMCTL_SHADOW_OP_CLEAN       11
  64.136 + /* Return the bitmap but do not modify internal copy. */
  64.137 +#define XEN_DOMCTL_SHADOW_OP_PEEK        12
  64.138 +
  64.139 +/* Memory allocation accessors. */
  64.140 +#define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION   30
  64.141 +#define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION   31
  64.142 +
  64.143 +/* Legacy enable operations. */
  64.144 + /* Equiv. to ENABLE with no mode flags. */
  64.145 +#define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST       1
  64.146 + /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */
  64.147 +#define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY   2
  64.148 + /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */
  64.149 +#define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE  3
  64.150 +
  64.151 +/* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */
  64.152 + /*
  64.153 +  * Shadow pagetables are refcounted: guest does not use explicit mmu
  64.154 +  * operations nor write-protect its pagetables.
  64.155 +  */
  64.156 +#define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT  (1 << 1)
  64.157 + /*
  64.158 +  * Log pages in a bitmap as they are dirtied.
  64.159 +  * Used for live relocation to determine which pages must be re-sent.
  64.160 +  */
  64.161 +#define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2)
  64.162 + /*
  64.163 +  * Automatically translate GPFNs into MFNs.
  64.164 +  */
  64.165 +#define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3)
  64.166 + /*
  64.167 +  * Xen does not steal virtual address space from the guest.
  64.168 +  * Requires HVM support.
  64.169 +  */
  64.170 +#define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL  (1 << 4)
  64.171 +
  64.172 +struct xen_domctl_shadow_op_stats {
  64.173 +    uint32_t fault_count;
  64.174 +    uint32_t dirty_count;
  64.175 +};
  64.176 +typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t;
  64.177 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t);
  64.178 +
  64.179 +struct xen_domctl_shadow_op {
  64.180 +    /* IN variables. */
  64.181 +    uint32_t       op;       /* XEN_DOMCTL_SHADOW_OP_* */
  64.182 +
  64.183 +    /* OP_ENABLE */
  64.184 +    uint32_t       mode;     /* XEN_DOMCTL_SHADOW_ENABLE_* */
  64.185 +
  64.186 +    /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */
  64.187 +    uint32_t       mb;       /* Shadow memory allocation in MB */
  64.188 +
  64.189 +    /* OP_PEEK / OP_CLEAN */
  64.190 +    XEN_GUEST_HANDLE_64(ulong) dirty_bitmap;
  64.191 +    uint64_t       pages;    /* Size of buffer. Updated with actual size. */
  64.192 +    struct xen_domctl_shadow_op_stats stats;
  64.193 +};
  64.194 +typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t;
  64.195 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t);
  64.196 +
  64.197 +
  64.198 +#define XEN_DOMCTL_max_mem           11
  64.199 +struct xen_domctl_max_mem {
  64.200 +    /* IN variables. */
  64.201 +    uint64_t max_memkb;
  64.202 +};
  64.203 +typedef struct xen_domctl_max_mem xen_domctl_max_mem_t;
  64.204 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t);
  64.205 +
  64.206 +
  64.207 +#define XEN_DOMCTL_setvcpucontext    12
  64.208 +#define XEN_DOMCTL_getvcpucontext    13
  64.209 +struct xen_domctl_vcpucontext {
  64.210 +    uint32_t              vcpu;                     /* IN */
  64.211 +    XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */
  64.212 +};
  64.213 +typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t;
  64.214 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t);
  64.215 +
  64.216 +
  64.217 +#define XEN_DOMCTL_getvcpuinfo       14
  64.218 +struct xen_domctl_getvcpuinfo {
  64.219 +    /* IN variables. */
  64.220 +    uint32_t vcpu;
  64.221 +    /* OUT variables. */
  64.222 +    uint8_t  online;                  /* currently online (not hotplugged)? */
  64.223 +    uint8_t  blocked;                 /* blocked waiting for an event? */
  64.224 +    uint8_t  running;                 /* currently scheduled on its CPU? */
  64.225 +    uint64_t cpu_time;                /* total cpu time consumed (ns) */
  64.226 +    uint32_t cpu;                     /* current mapping   */
  64.227 +};
  64.228 +typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t;
  64.229 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t);
  64.230 +
  64.231 +
  64.232 +/* Get/set which physical cpus a vcpu can execute on. */
  64.233 +#define XEN_DOMCTL_setvcpuaffinity    9
  64.234 +#define XEN_DOMCTL_getvcpuaffinity   25
  64.235 +struct xen_domctl_vcpuaffinity {
  64.236 +    uint32_t  vcpu;              /* IN */
  64.237 +    struct xenctl_cpumap cpumap; /* IN/OUT */
  64.238 +};
  64.239 +typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
  64.240 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
  64.241 +
  64.242 +
  64.243 +#define XEN_DOMCTL_max_vcpus         15
  64.244 +struct xen_domctl_max_vcpus {
  64.245 +    uint32_t max;           /* maximum number of vcpus */
  64.246 +};
  64.247 +typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t;
  64.248 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
  64.249 +
  64.250 +
  64.251 +#define XEN_DOMCTL_scheduler_op      16
  64.252 +/* Scheduler types. */
  64.253 +#define XEN_SCHEDULER_SEDF     4
  64.254 +#define XEN_SCHEDULER_CREDIT   5
  64.255 +/* Set or get info? */
  64.256 +#define XEN_DOMCTL_SCHEDOP_putinfo 0
  64.257 +#define XEN_DOMCTL_SCHEDOP_getinfo 1
  64.258 +struct xen_domctl_scheduler_op {
  64.259 +    uint32_t sched_id;  /* XEN_SCHEDULER_* */
  64.260 +    uint32_t cmd;       /* XEN_DOMCTL_SCHEDOP_* */
  64.261 +    union {
  64.262 +        struct xen_domctl_sched_sedf {
  64.263 +            uint64_t period;
  64.264 +            uint64_t slice;
  64.265 +            uint64_t latency;
  64.266 +            uint32_t extratime;
  64.267 +            uint32_t weight;
  64.268 +        } sedf;
  64.269 +        struct xen_domctl_sched_credit {
  64.270 +            uint16_t weight;
  64.271 +            uint16_t cap;
  64.272 +        } credit;
  64.273 +    } u;
  64.274 +};
  64.275 +typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
  64.276 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t);
  64.277 +
  64.278 +
  64.279 +#define XEN_DOMCTL_setdomainhandle   17
  64.280 +struct xen_domctl_setdomainhandle {
  64.281 +    xen_domain_handle_t handle;
  64.282 +};
  64.283 +typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t;
  64.284 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t);
  64.285 +
  64.286 +
  64.287 +#define XEN_DOMCTL_setdebugging      18
  64.288 +struct xen_domctl_setdebugging {
  64.289 +    uint8_t enable;
  64.290 +};
  64.291 +typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t;
  64.292 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t);
  64.293 +
  64.294 +
  64.295 +#define XEN_DOMCTL_irq_permission    19
  64.296 +struct xen_domctl_irq_permission {
  64.297 +    uint8_t pirq;
  64.298 +    uint8_t allow_access;    /* flag to specify enable/disable of IRQ access */
  64.299 +};
  64.300 +typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t;
  64.301 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t);
  64.302 +
  64.303 +
  64.304 +#define XEN_DOMCTL_iomem_permission  20
  64.305 +struct xen_domctl_iomem_permission {
  64.306 +    uint64_t first_mfn;       /* first page (physical page number) in range */
  64.307 +    uint64_t nr_mfns;         /* number of pages in range (>0) */
  64.308 +    uint8_t  allow_access;    /* allow (!0) or deny (0) access to range? */
  64.309 +};
  64.310 +typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t;
  64.311 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t);
  64.312 +
  64.313 +
  64.314 +#define XEN_DOMCTL_ioport_permission 21
  64.315 +struct xen_domctl_ioport_permission {
  64.316 +    uint32_t first_port;              /* first port int range */
  64.317 +    uint32_t nr_ports;                /* size of port range */
  64.318 +    uint8_t  allow_access;            /* allow or deny access to range? */
  64.319 +};
  64.320 +typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t;
  64.321 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t);
  64.322 +
  64.323 +#define XEN_DOMCTL_hypercall_init    22
  64.324 +struct xen_domctl_hypercall_init {
  64.325 +    uint64_t  gmfn;            /* GMFN to be initialised */
  64.326 +};
  64.327 +typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t;
  64.328 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t);
  64.329 +
  64.330 +#define XEN_DOMCTL_arch_setup        23
  64.331 +#define _XEN_DOMAINSETUP_hvm_guest 0
  64.332 +#define XEN_DOMAINSETUP_hvm_guest  (1UL<<_XEN_DOMAINSETUP_hvm_guest)
  64.333 +#define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save)  */
  64.334 +#define XEN_DOMAINSETUP_query  (1UL<<_XEN_DOMAINSETUP_query)
  64.335 +typedef struct xen_domctl_arch_setup {
  64.336 +    uint64_t flags;      /* XEN_DOMAINSETUP_* */
  64.337 +#ifdef __ia64__
  64.338 +    uint64_t bp;            /* mpaddr of boot param area */
  64.339 +    uint64_t maxmem;        /* Highest memory address for MDT.  */
  64.340 +    uint64_t xsi_va;        /* Xen shared_info area virtual address.  */
  64.341 +    uint32_t hypercall_imm; /* Break imm for Xen hypercalls.  */
  64.342 +#endif
  64.343 +} xen_domctl_arch_setup_t;
  64.344 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t);
  64.345 +
  64.346 +#define XEN_DOMCTL_settimeoffset     24
  64.347 +struct xen_domctl_settimeoffset {
  64.348 +    int32_t  time_offset_seconds; /* applied to domain wallclock time */
  64.349 +};
  64.350 +typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
  64.351 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
  64.352 +
  64.353 +struct xen_domctl {
  64.354 +    uint32_t cmd;
  64.355 +    uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
  64.356 +    domid_t  domain;
  64.357 +    union {
  64.358 +        struct xen_domctl_createdomain      createdomain;
  64.359 +        struct xen_domctl_getdomaininfo     getdomaininfo;
  64.360 +        struct xen_domctl_getmemlist        getmemlist;
  64.361 +        struct xen_domctl_getpageframeinfo  getpageframeinfo;
  64.362 +        struct xen_domctl_getpageframeinfo2 getpageframeinfo2;
  64.363 +        struct xen_domctl_vcpuaffinity      vcpuaffinity;
  64.364 +        struct xen_domctl_shadow_op         shadow_op;
  64.365 +        struct xen_domctl_max_mem           max_mem;
  64.366 +        struct xen_domctl_vcpucontext       vcpucontext;
  64.367 +        struct xen_domctl_getvcpuinfo       getvcpuinfo;
  64.368 +        struct xen_domctl_max_vcpus         max_vcpus;
  64.369 +        struct xen_domctl_scheduler_op      scheduler_op;
  64.370 +        struct xen_domctl_setdomainhandle   setdomainhandle;
  64.371 +        struct xen_domctl_setdebugging      setdebugging;
  64.372 +        struct xen_domctl_irq_permission    irq_permission;
  64.373 +        struct xen_domctl_iomem_permission  iomem_permission;
  64.374 +        struct xen_domctl_ioport_permission ioport_permission;
  64.375 +        struct xen_domctl_hypercall_init    hypercall_init;
  64.376 +        struct xen_domctl_arch_setup        arch_setup;
  64.377 +        struct xen_domctl_settimeoffset     settimeoffset;
  64.378 +        uint8_t                             pad[128];
  64.379 +    } u;
  64.380 +};
  64.381 +typedef struct xen_domctl xen_domctl_t;
  64.382 +DEFINE_XEN_GUEST_HANDLE(xen_domctl_t);
  64.383 +
  64.384 +#undef uint64_t
  64.385 +
  64.386 +#endif /* __XEN_PUBLIC_DOMCTL_H__ */
  64.387 +
  64.388 +/*
  64.389 + * Local variables:
  64.390 + * mode: C
  64.391 + * c-set-style: "BSD"
  64.392 + * c-basic-offset: 4
  64.393 + * tab-width: 4
  64.394 + * indent-tabs-mode: nil
  64.395 + * End:
  64.396 + */
    65.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    65.2 +++ b/xen/include/public/platform.h	Fri Aug 25 18:39:10 2006 +0100
    65.3 @@ -0,0 +1,125 @@
    65.4 +/******************************************************************************
    65.5 + * platform.h
    65.6 + * 
    65.7 + * Hardware platform operations. Intended for use by domain-0 kernel.
    65.8 + * 
    65.9 + * Copyright (c) 2002-2006, K Fraser
   65.10 + */
   65.11 +
   65.12 +#ifndef __XEN_PUBLIC_PLATFORM_H__
   65.13 +#define __XEN_PUBLIC_PLATFORM_H__
   65.14 +
   65.15 +#include "xen.h"
   65.16 +
   65.17 +#define XENPF_INTERFACE_VERSION 0x03000001
   65.18 +
   65.19 +/*
   65.20 + * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
   65.21 + * 1 January, 1970 if the current system time was <system_time>.
   65.22 + */
   65.23 +#define XENPF_settime             17
   65.24 +struct xenpf_settime {
   65.25 +    /* IN variables. */
   65.26 +    uint32_t secs;
   65.27 +    uint32_t nsecs;
   65.28 +    uint64_t system_time;
   65.29 +};
   65.30 +typedef struct xenpf_settime xenpf_settime_t;
   65.31 +DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t);
   65.32 +
   65.33 +/*
   65.34 + * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type.
   65.35 + * On x86, @type is an architecture-defined MTRR memory type.
   65.36 + * On success, returns the MTRR that was used (@reg) and a handle that can
   65.37 + * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting.
   65.38 + * (x86-specific).
   65.39 + */
   65.40 +#define XENPF_add_memtype         31
   65.41 +struct xenpf_add_memtype {
   65.42 +    /* IN variables. */
   65.43 +    xen_pfn_t mfn;
   65.44 +    uint64_t nr_mfns;
   65.45 +    uint32_t type;
   65.46 +    /* OUT variables. */
   65.47 +    uint32_t handle;
   65.48 +    uint32_t reg;
   65.49 +};
   65.50 +typedef struct xenpf_add_memtype xenpf_add_memtype_t;
   65.51 +DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t);
   65.52 +
   65.53 +/*
   65.54 + * Tear down an existing memory-range type. If @handle is remembered then it
   65.55 + * should be passed in to accurately tear down the correct setting (in case
   65.56 + * of overlapping memory regions with differing types). If it is not known
   65.57 + * then @handle should be set to zero. In all cases @reg must be set.
   65.58 + * (x86-specific).
   65.59 + */
   65.60 +#define XENPF_del_memtype         32
   65.61 +struct xenpf_del_memtype {
   65.62 +    /* IN variables. */
   65.63 +    uint32_t handle;
   65.64 +    uint32_t reg;
   65.65 +};
   65.66 +typedef struct xenpf_del_memtype xenpf_del_memtype_t;
   65.67 +DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t);
   65.68 +
   65.69 +/* Read current type of an MTRR (x86-specific). */
   65.70 +#define XENPF_read_memtype        33
   65.71 +struct xenpf_read_memtype {
   65.72 +    /* IN variables. */
   65.73 +    uint32_t reg;
   65.74 +    /* OUT variables. */
   65.75 +    xen_pfn_t mfn;
   65.76 +    uint64_t nr_mfns;
   65.77 +    uint32_t type;
   65.78 +};
   65.79 +typedef struct xenpf_read_memtype xenpf_read_memtype_t;
   65.80 +DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t);
   65.81 +
   65.82 +#define XENPF_microcode_update    35
   65.83 +struct xenpf_microcode_update {
   65.84 +    /* IN variables. */
   65.85 +    XEN_GUEST_HANDLE(void) data;      /* Pointer to microcode data */
   65.86 +    uint32_t length;                  /* Length of microcode data. */
   65.87 +};
   65.88 +typedef struct xenpf_microcode_update xenpf_microcode_update_t;
   65.89 +DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t);
   65.90 +
   65.91 +#define XENPF_platform_quirk      39
   65.92 +#define QUIRK_NOIRQBALANCING      1 /* Do not restrict IO-APIC RTE targets */
   65.93 +#define QUIRK_IOAPIC_BAD_REGSEL   2 /* IO-APIC REGSEL forgets its value    */
   65.94 +#define QUIRK_IOAPIC_GOOD_REGSEL  3 /* IO-APIC REGSEL behaves properly     */
   65.95 +struct xenpf_platform_quirk {
   65.96 +    /* IN variables. */
   65.97 +    uint32_t quirk_id;
   65.98 +};
   65.99 +typedef struct xenpf_platform_quirk xenpf_platform_quirk_t;
  65.100 +DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t);
  65.101 +
  65.102 +struct xen_platform_op {
  65.103 +    uint32_t cmd;
  65.104 +    uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
  65.105 +    union {
  65.106 +        struct xenpf_settime           settime;
  65.107 +        struct xenpf_add_memtype       add_memtype;
  65.108 +        struct xenpf_del_memtype       del_memtype;
  65.109 +        struct xenpf_read_memtype      read_memtype;
  65.110 +        struct xenpf_microcode_update  microcode;
  65.111 +        struct xenpf_platform_quirk    platform_quirk;
  65.112 +        uint8_t                        pad[128];
  65.113 +    } u;
  65.114 +};
  65.115 +typedef struct xen_platform_op xen_platform_op_t;
  65.116 +DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t);
  65.117 +
  65.118 +#endif /* __XEN_PUBLIC_PLATFORM_H__ */
  65.119 +
  65.120 +/*
  65.121 + * Local variables:
  65.122 + * mode: C
  65.123 + * c-set-style: "BSD"
  65.124 + * c-basic-offset: 4
  65.125 + * tab-width: 4
  65.126 + * indent-tabs-mode: nil
  65.127 + * End:
  65.128 + */
    66.1 --- a/xen/include/public/sched_ctl.h	Fri Aug 25 10:39:24 2006 +0100
    66.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    66.3 @@ -1,56 +0,0 @@
    66.4 -/******************************************************************************
    66.5 - * Generic scheduler control interface.
    66.6 - *
    66.7 - * Mark Williamson, (C) 2004 Intel Research Cambridge
    66.8 - */
    66.9 -
   66.10 -#ifndef __XEN_PUBLIC_SCHED_CTL_H__
   66.11 -#define __XEN_PUBLIC_SCHED_CTL_H__
   66.12 -
   66.13 -/* Scheduler types. */
   66.14 -#define SCHED_SEDF     4
   66.15 -#define SCHED_CREDIT   5
   66.16 -
   66.17 -/* Set or get info? */
   66.18 -#define SCHED_INFO_PUT 0
   66.19 -#define SCHED_INFO_GET 1
   66.20 -
   66.21 -/*
   66.22 - * Generic scheduler control command - used to adjust system-wide scheduler
   66.23 - * parameters
   66.24 - */
   66.25 -struct sched_ctl_cmd {
   66.26 -    uint32_t sched_id;
   66.27 -    uint32_t direction;
   66.28 -};
   66.29 -
   66.30 -struct sched_adjdom_cmd {
   66.31 -    uint32_t sched_id;
   66.32 -    uint32_t direction;
   66.33 -    domid_t  domain;
   66.34 -    union {
   66.35 -        struct sedf_adjdom {
   66.36 -            uint64_t period;
   66.37 -            uint64_t slice;
   66.38 -            uint64_t latency;
   66.39 -            uint32_t extratime;
   66.40 -            uint32_t weight;
   66.41 -        } sedf;
   66.42 -        struct sched_credit_adjdom {
   66.43 -            uint16_t weight;
   66.44 -            uint16_t cap;
   66.45 -        } credit;
   66.46 -    } u;
   66.47 -};
   66.48 -
   66.49 -#endif /* __XEN_PUBLIC_SCHED_CTL_H__ */
   66.50 -
   66.51 -/*
   66.52 - * Local variables:
   66.53 - * mode: C
   66.54 - * c-set-style: "BSD"
   66.55 - * c-basic-offset: 4
   66.56 - * tab-width: 4
   66.57 - * indent-tabs-mode: nil
   66.58 - * End:
   66.59 - */
    67.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    67.2 +++ b/xen/include/public/sysctl.h	Fri Aug 25 18:39:10 2006 +0100
    67.3 @@ -0,0 +1,155 @@
    67.4 +/******************************************************************************
    67.5 + * sysctl.h
    67.6 + * 
    67.7 + * System management operations. For use by node control stack.
    67.8 + * 
    67.9 + * Copyright (c) 2002-2006, K Fraser
   67.10 + */
   67.11 +
   67.12 +#ifndef __XEN_PUBLIC_SYSCTL_H__
   67.13 +#define __XEN_PUBLIC_SYSCTL_H__
   67.14 +
   67.15 +#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
   67.16 +#error "sysctl operations are intended for use by node control tools only"
   67.17 +#endif
   67.18 +
   67.19 +#include "xen.h"
   67.20 +#include "domctl.h"
   67.21 +
   67.22 +#define XEN_SYSCTL_INTERFACE_VERSION 0x00000001
   67.23 +
   67.24 +#define uint64_t uint64_aligned_t
   67.25 +
   67.26 +/*
   67.27 + * Read console content from Xen buffer ring.
   67.28 + */
   67.29 +#define XEN_SYSCTL_readconsole       1
   67.30 +struct xen_sysctl_readconsole {
   67.31 +    /* IN variables. */
   67.32 +    uint32_t clear;                   /* Non-zero -> clear after reading. */
   67.33 +    XEN_GUEST_HANDLE_64(char) buffer; /* Buffer start */
   67.34 +    /* IN/OUT variables. */
   67.35 +    uint32_t count;            /* In: Buffer size;  Out: Used buffer size  */
   67.36 +};
   67.37 +typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
   67.38 +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
   67.39 +
   67.40 +/* Get trace buffers machine base address */
   67.41 +#define XEN_SYSCTL_tbuf_op           2
   67.42 +struct xen_sysctl_tbuf_op {
   67.43 +    /* IN variables */
   67.44 +#define XEN_SYSCTL_TBUFOP_get_info     0
   67.45 +#define XEN_SYSCTL_TBUFOP_set_cpu_mask 1
   67.46 +#define XEN_SYSCTL_TBUFOP_set_evt_mask 2
   67.47 +#define XEN_SYSCTL_TBUFOP_set_size     3
   67.48 +#define XEN_SYSCTL_TBUFOP_enable       4
   67.49 +#define XEN_SYSCTL_TBUFOP_disable      5
   67.50 +    uint32_t cmd;
   67.51 +    /* IN/OUT variables */
   67.52 +    struct xenctl_cpumap cpu_mask;
   67.53 +    uint32_t             evt_mask;
   67.54 +    /* OUT variables */
   67.55 +    uint64_t buffer_mfn;
   67.56 +    uint32_t size;
   67.57 +};
   67.58 +typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
   67.59 +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
   67.60 +
   67.61 +/*
   67.62 + * Get physical information about the host machine
   67.63 + */
   67.64 +#define XEN_SYSCTL_physinfo          3
   67.65 +struct xen_sysctl_physinfo {
   67.66 +    uint32_t threads_per_core;
   67.67 +    uint32_t cores_per_socket;
   67.68 +    uint32_t sockets_per_node;
   67.69 +    uint32_t nr_nodes;
   67.70 +    uint32_t cpu_khz;
   67.71 +    uint64_t total_pages;
   67.72 +    uint64_t free_pages;
   67.73 +    uint64_t scrub_pages;
   67.74 +    uint32_t hw_cap[8];
   67.75 +};
   67.76 +typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
   67.77 +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
   67.78 +
   67.79 +/*
   67.80 + * Get the ID of the current scheduler.
   67.81 + */
   67.82 +#define XEN_SYSCTL_sched_id          4
   67.83 +struct xen_sysctl_sched_id {
   67.84 +    /* OUT variable */
   67.85 +    uint32_t sched_id;
   67.86 +};
   67.87 +typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
   67.88 +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
   67.89 +
   67.90 +/* Interface for controlling Xen software performance counters. */
   67.91 +#define XEN_SYSCTL_perfc_op          5
   67.92 +/* Sub-operations: */
   67.93 +#define XEN_SYSCTL_PERFCOP_reset 1   /* Reset all counters to zero. */
   67.94 +#define XEN_SYSCTL_PERFCOP_query 2   /* Get perfctr information. */
   67.95 +struct xen_sysctl_perfc_desc {
   67.96 +    char         name[80];             /* name of perf counter */
   67.97 +    uint32_t     nr_vals;              /* number of values for this counter */
   67.98 +};
   67.99 +typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t;
  67.100 +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t);
  67.101 +typedef uint32_t xen_sysctl_perfc_val_t;
  67.102 +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t);
  67.103 +
  67.104 +struct xen_sysctl_perfc_op {
  67.105 +    /* IN variables. */
  67.106 +    uint32_t       cmd;                /*  XEN_SYSCTL_PERFCOP_??? */
  67.107 +    /* OUT variables. */
  67.108 +    uint32_t       nr_counters;       /*  number of counters description  */
  67.109 +    uint32_t       nr_vals;			  /*  number of values  */
  67.110 +    /* counter information (or NULL) */
  67.111 +    XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc;
  67.112 +    /* counter values (or NULL) */
  67.113 +    XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
  67.114 +};
  67.115 +typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
  67.116 +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
  67.117 +
  67.118 +#define XEN_SYSCTL_getdomaininfolist 6
  67.119 +struct xen_sysctl_getdomaininfolist {
  67.120 +    /* IN variables. */
  67.121 +    domid_t               first_domain;
  67.122 +    uint32_t              max_domains;
  67.123 +    XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer;
  67.124 +    /* OUT variables. */
  67.125 +    uint32_t              num_domains;
  67.126 +};
  67.127 +typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
  67.128 +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
  67.129 +
  67.130 +struct xen_sysctl {
  67.131 +    uint32_t cmd;
  67.132 +    uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
  67.133 +    union {
  67.134 +        struct xen_sysctl_readconsole       readconsole;
  67.135 +        struct xen_sysctl_tbuf_op           tbuf_op;
  67.136 +        struct xen_sysctl_physinfo          physinfo;
  67.137 +        struct xen_sysctl_sched_id          sched_id;
  67.138 +        struct xen_sysctl_perfc_op          perfc_op;
  67.139 +        struct xen_sysctl_getdomaininfolist getdomaininfolist;
  67.140 +        uint8_t                             pad[128];
  67.141 +    } u;
  67.142 +};
  67.143 +typedef struct xen_sysctl xen_sysctl_t;
  67.144 +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t);
  67.145 +
  67.146 +#undef uint64_t
  67.147 +
  67.148 +#endif /* __XEN_PUBLIC_SYSCTL_H__ */
  67.149 +
  67.150 +/*
  67.151 + * Local variables:
  67.152 + * mode: C
  67.153 + * c-set-style: "BSD"
  67.154 + * c-basic-offset: 4
  67.155 + * tab-width: 4
  67.156 + * indent-tabs-mode: nil
  67.157 + * End:
  67.158 + */
    68.1 --- a/xen/include/public/xen-compat.h	Fri Aug 25 10:39:24 2006 +0100
    68.2 +++ b/xen/include/public/xen-compat.h	Fri Aug 25 18:39:10 2006 +0100
    68.3 @@ -9,7 +9,7 @@
    68.4  #ifndef __XEN_PUBLIC_XEN_COMPAT_H__
    68.5  #define __XEN_PUBLIC_XEN_COMPAT_H__
    68.6  
    68.7 -#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030203
    68.8 +#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030204
    68.9  
   68.10  #if defined(__XEN__) || defined(__XEN_TOOLS__)
   68.11  /* Xen is built with matching headers and implements the latest interface. */
    69.1 --- a/xen/include/public/xen.h	Fri Aug 25 10:39:24 2006 +0100
    69.2 +++ b/xen/include/public/xen.h	Fri Aug 25 18:39:10 2006 +0100
    69.3 @@ -34,7 +34,7 @@
    69.4  #define __HYPERVISOR_set_callbacks         4
    69.5  #define __HYPERVISOR_fpu_taskswitch        5
    69.6  #define __HYPERVISOR_sched_op_compat       6 /* compat since 0x00030101 */
    69.7 -#define __HYPERVISOR_dom0_op               7
    69.8 +#define __HYPERVISOR_platform_op           7
    69.9  #define __HYPERVISOR_set_debugreg          8
   69.10  #define __HYPERVISOR_get_debugreg          9
   69.11  #define __HYPERVISOR_update_descriptor    10
   69.12 @@ -61,6 +61,8 @@
   69.13  #define __HYPERVISOR_event_channel_op     32
   69.14  #define __HYPERVISOR_physdev_op           33
   69.15  #define __HYPERVISOR_hvm_op               34
   69.16 +#define __HYPERVISOR_sysctl               35
   69.17 +#define __HYPERVISOR_domctl               36
   69.18  
   69.19  /* Architecture-specific hypercall definitions. */
   69.20  #define __HYPERVISOR_arch_0               48
   69.21 @@ -90,6 +92,11 @@
   69.22  #define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat
   69.23  #endif
   69.24  
   69.25 +/* New platform_op hypercall introduced in 0x00030204. */
   69.26 +#if __XEN_INTERFACE_VERSION__ < 0x00030204
   69.27 +#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
   69.28 +#endif
   69.29 +
   69.30  /* 
   69.31   * VIRTUAL INTERRUPTS
   69.32   * 
   69.33 @@ -530,14 +537,17 @@ typedef struct dom0_vga_console_info {
   69.34      uint8_t rsvd_size;
   69.35  } dom0_vga_console_info_t;
   69.36  
   69.37 -typedef uint64_t cpumap_t;
   69.38 -
   69.39  typedef uint8_t xen_domain_handle_t[16];
   69.40  
   69.41  /* Turn a plain number into a C unsigned long constant. */
   69.42  #define __mk_unsigned_long(x) x ## UL
   69.43  #define mk_unsigned_long(x) __mk_unsigned_long(x)
   69.44  
   69.45 +DEFINE_XEN_GUEST_HANDLE(uint8_t);
   69.46 +DEFINE_XEN_GUEST_HANDLE(uint16_t);
   69.47 +DEFINE_XEN_GUEST_HANDLE(uint32_t);
   69.48 +DEFINE_XEN_GUEST_HANDLE(uint64_t);
   69.49 +
   69.50  #else /* __ASSEMBLY__ */
   69.51  
   69.52  /* In assembly code we cannot use C numeric constant suffixes. */
    70.1 --- a/xen/include/xen/cpumask.h	Fri Aug 25 10:39:24 2006 +0100
    70.2 +++ b/xen/include/xen/cpumask.h	Fri Aug 25 18:39:10 2006 +0100
    70.3 @@ -379,4 +379,11 @@ extern cpumask_t cpu_present_map;
    70.4  #define for_each_online_cpu(cpu)  for_each_cpu_mask((cpu), cpu_online_map)
    70.5  #define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
    70.6  
    70.7 +/* Copy to/from cpumap provided by control tools. */
    70.8 +struct xenctl_cpumap;
    70.9 +void cpumask_to_xenctl_cpumap(
   70.10 +    struct xenctl_cpumap *enctl_cpumap, cpumask_t *cpumask);
   70.11 +void xenctl_cpumap_to_cpumask(
   70.12 +    cpumask_t *cpumask, struct xenctl_cpumap *enctl_cpumap);
   70.13 +
   70.14  #endif /* __XEN_CPUMASK_H */
    71.1 --- a/xen/include/xen/hypercall.h	Fri Aug 25 10:39:24 2006 +0100
    71.2 +++ b/xen/include/xen/hypercall.h	Fri Aug 25 18:39:10 2006 +0100
    71.3 @@ -9,7 +9,9 @@
    71.4  #include <xen/types.h>
    71.5  #include <xen/time.h>
    71.6  #include <public/xen.h>
    71.7 -#include <public/dom0_ops.h>
    71.8 +#include <public/domctl.h>
    71.9 +#include <public/sysctl.h>
   71.10 +#include <public/platform.h>
   71.11  #include <public/acm_ops.h>
   71.12  #include <public/event_channel.h>
   71.13  #include <asm/hypercall.h>
   71.14 @@ -29,8 +31,16 @@ do_sched_op(
   71.15      XEN_GUEST_HANDLE(void) arg);
   71.16  
   71.17  extern long
   71.18 -do_dom0_op(
   71.19 -    XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op);
   71.20 +do_domctl(
   71.21 +    XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
   71.22 +
   71.23 +extern long
   71.24 +do_sysctl(
   71.25 +    XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl);
   71.26 +
   71.27 +extern long
   71.28 +do_platform_op(
   71.29 +    XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op);
   71.30  
   71.31  extern long
   71.32  do_memory_op(
    72.1 --- a/xen/include/xen/sched-if.h	Fri Aug 25 10:39:24 2006 +0100
    72.2 +++ b/xen/include/xen/sched-if.h	Fri Aug 25 18:39:10 2006 +0100
    72.3 @@ -73,9 +73,8 @@ struct scheduler {
    72.4  
    72.5      struct task_slice (*do_schedule) (s_time_t);
    72.6  
    72.7 -    int          (*control)        (struct sched_ctl_cmd *);
    72.8 -    int          (*adjdom)         (struct domain *,
    72.9 -                                    struct sched_adjdom_cmd *);
   72.10 +    int          (*adjust)         (struct domain *,
   72.11 +                                    struct xen_domctl_scheduler_op *);
   72.12      void         (*dump_settings)  (void);
   72.13      void         (*dump_cpu_state) (int);
   72.14  };
    73.1 --- a/xen/include/xen/sched.h	Fri Aug 25 10:39:24 2006 +0100
    73.2 +++ b/xen/include/xen/sched.h	Fri Aug 25 18:39:10 2006 +0100
    73.3 @@ -7,7 +7,7 @@
    73.4  #include <xen/spinlock.h>
    73.5  #include <xen/smp.h>
    73.6  #include <public/xen.h>
    73.7 -#include <public/dom0_ops.h>
    73.8 +#include <public/domctl.h>
    73.9  #include <public/vcpu.h>
   73.10  #include <xen/time.h>
   73.11  #include <xen/timer.h>
   73.12 @@ -243,7 +243,7 @@ extern int construct_dom0(
   73.13      unsigned long image_start, unsigned long image_len, 
   73.14      unsigned long initrd_start, unsigned long initrd_len,
   73.15      char *cmdline);
   73.16 -extern int set_info_guest(struct domain *d, dom0_setvcpucontext_t *);
   73.17 +extern int set_info_guest(struct domain *d, xen_domctl_vcpucontext_t *);
   73.18  
   73.19  struct domain *find_domain_by_id(domid_t dom);
   73.20  extern void domain_destroy(struct domain *d);
   73.21 @@ -282,8 +282,7 @@ void scheduler_init(void);
   73.22  void schedulers_start(void);
   73.23  int  sched_init_vcpu(struct vcpu *);
   73.24  void sched_destroy_domain(struct domain *);
   73.25 -long sched_ctl(struct sched_ctl_cmd *);
   73.26 -long sched_adjdom(struct sched_adjdom_cmd *);
   73.27 +long sched_adjust(struct domain *, struct xen_domctl_scheduler_op *);
   73.28  int  sched_id(void);
   73.29  void vcpu_wake(struct vcpu *d);
   73.30  void vcpu_sleep_nosync(struct vcpu *d);
    74.1 --- a/xen/include/xen/trace.h	Fri Aug 25 10:39:24 2006 +0100
    74.2 +++ b/xen/include/xen/trace.h	Fri Aug 25 18:39:10 2006 +0100
    74.3 @@ -16,15 +16,13 @@
    74.4   *
    74.5   * Access to the trace buffers is via a dom0 hypervisor op and analysis of
    74.6   * trace buffer contents can then be performed using a userland tool.
    74.7 - *
    74.8 - * See also common/trace.c and the dom0 op in include/public/dom0_ops.h
    74.9   */
   74.10  
   74.11  #ifndef __XEN_TRACE_H__
   74.12  #define __XEN_TRACE_H__
   74.13  
   74.14  #include <xen/config.h>
   74.15 -#include <public/dom0_ops.h>
   74.16 +#include <public/sysctl.h>
   74.17  #include <public/trace.h>
   74.18  
   74.19  extern int tb_init_done;
   74.20 @@ -33,7 +31,7 @@ extern int tb_init_done;
   74.21  void init_trace_bufs(void);
   74.22  
   74.23  /* used to retrieve the physical address of the trace buffers */
   74.24 -int tb_control(dom0_tbufcontrol_t *tbc);
   74.25 +int tb_control(struct xen_sysctl_tbuf_op *tbc);
   74.26  
   74.27  void trace(u32 event, unsigned long d1, unsigned long d2,
   74.28             unsigned long d3, unsigned long d4, unsigned long d5);