}
break;
+ case DOM0_IOPL:
+ {
+ extern long do_iopl(domid_t, unsigned int);
+ ret = do_iopl(op->u.iopl.domain, op->u.iopl.iopl);
+ }
+ break;
+
+ case DOM0_PHYSINFO:
+ {
+ dom0_physinfo_t *pi = &op->u.physinfo;
+
+ pi->ht_per_core = opt_noht ? 1 : ht_per_core;
+ pi->cores = smp_num_cpus / pi->ht_per_core;
+ pi->total_pages = max_page;
+ pi->free_pages = avail_domheap_pages();
+ pi->cpu_khz = cpu_khz;
+
+ copy_to_user(u_dom0_op, op, sizeof(*op));
+ ret = 0;
+ }
+ break;
+
+ case DOM0_GETPAGEFRAMEINFO:
+ {
+ struct pfn_info *page;
+ unsigned long pfn = op->u.getpageframeinfo.pfn;
+ domid_t dom = op->u.getpageframeinfo.domain;
+ struct domain *d;
+
+ ret = -EINVAL;
+
+ if ( unlikely(pfn >= max_page) ||
+ unlikely((d = find_domain_by_id(dom)) == NULL) )
+ break;
+
+ page = &frame_table[pfn];
+
+ if ( likely(get_page(page, d)) )
+ {
+ ret = 0;
+
+ op->u.getpageframeinfo.type = NOTAB;
+
+ if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
+ {
+ switch ( page->u.inuse.type_info & PGT_type_mask )
+ {
+ case PGT_l1_page_table:
+ op->u.getpageframeinfo.type = L1TAB;
+ break;
+ case PGT_l2_page_table:
+ op->u.getpageframeinfo.type = L2TAB;
+ break;
+ case PGT_l3_page_table:
+ op->u.getpageframeinfo.type = L3TAB;
+ break;
+ case PGT_l4_page_table:
+ op->u.getpageframeinfo.type = L4TAB;
+ break;
+ }
+ }
+
+ put_page(page);
+ }
+
+ put_domain(d);
+
+ copy_to_user(u_dom0_op, op, sizeof(*op));
+ }
+ break;
+
+ case DOM0_GETPAGEFRAMEINFO2:
+ {
+#define GPF2_BATCH 128
+ int n,j;
+ int num = op->u.getpageframeinfo2.num;
+ domid_t dom = op->u.getpageframeinfo2.domain;
+ unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array;
+ struct domain *d;
+ unsigned long l_arr[GPF2_BATCH];
+ ret = -ESRCH;
+
+ if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
+ break;
+
+ if ( unlikely(num > 1024) )
+ {
+ ret = -E2BIG;
+ break;
+ }
+
+ ret = 0;
+ for( n = 0; n < num; )
+ {
+ int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
+
+ if ( copy_from_user(l_arr, &s_ptr[n], k*sizeof(unsigned long)) )
+ {
+ ret = -EINVAL;
+ break;
+ }
+
+ for( j = 0; j < k; j++ )
+ {
+ struct pfn_info *page;
+ unsigned long mfn = l_arr[j];
+
+ if ( unlikely(mfn >= max_page) )
+ goto e2_err;
+
+ page = &frame_table[mfn];
+
+ if ( likely(get_page(page, d)) )
+ {
+ unsigned long type = 0;
+
+ switch( page->u.inuse.type_info & PGT_type_mask )
+ {
+ case PGT_l1_page_table:
+ type = L1TAB;
+ break;
+ case PGT_l2_page_table:
+ type = L2TAB;
+ break;
+ case PGT_l3_page_table:
+ type = L3TAB;
+ break;
+ case PGT_l4_page_table:
+ type = L4TAB;
+ break;
+ }
+
+ if ( page->u.inuse.type_info & PGT_pinned )
+ type |= LPINTAB;
+ l_arr[j] |= type;
+ put_page(page);
+ }
+ else
+ {
+ e2_err:
+ l_arr[j] |= XTAB;
+ }
+
+ }
+
+ if ( copy_to_user(&s_ptr[n], l_arr, k*sizeof(unsigned long)) )
+ {
+ ret = -EINVAL;
+ break;
+ }
+
+ n += j;
+ }
+
+ put_domain(d);
+ }
+ break;
+
default:
ret = -ENOSYS;
__machine_halt(NULL);
}
+void dump_pageframe_info(struct domain *d)
+{
+ struct pfn_info *page;
+ struct list_head *ent;
+
+ if ( d->tot_pages < 10 )
+ {
+ list_for_each ( ent, &d->page_list )
+ {
+ page = list_entry(ent, struct pfn_info, list);
+ printk("Page %08x: caf=%08x, taf=%08x\n",
+ page_to_phys(page), page->count_info,
+ page->u.inuse.type_info);
+ }
+ }
+
+ page = virt_to_page(d->shared_info);
+ printk("Shared_info@%08x: caf=%08x, taf=%08x\n",
+ page_to_phys(page), page->count_info,
+ page->u.inuse.type_info);
+}
+
+xmem_cache_t *domain_struct_cachep;
+void __init domain_startofday(void)
+{
+ domain_struct_cachep = xmem_cache_create(
+ "domain_cache", sizeof(struct domain),
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if ( domain_struct_cachep == NULL )
+ panic("No slab cache for domain structs.");
+}
+
+struct domain *arch_alloc_domain_struct(void)
+{
+ return xmem_cache_alloc(domain_struct_cachep);
+}
+
+void arch_free_domain_struct(struct domain *d)
+{
+ xmem_cache_free(domain_struct_cachep, d);
+}
+
void free_perdomain_pt(struct domain *d)
{
free_xenheap_page((unsigned long)d->mm.perdomain_pt);
u32 type_info;
domid_t domid;
- cleanup_writable_pagetable(d, PTWR_CLEANUP_ACTIVE | PTWR_CLEANUP_INACTIVE);
+ cleanup_writable_pagetable(d);
/*
* If we are resuming after preemption, read how much work we have already
if ( unlikely(page_nr >= (HYPERVISOR_VIRT_START >> PAGE_SHIFT)) )
return -EINVAL;
- cleanup_writable_pagetable(d, PTWR_CLEANUP_ACTIVE | PTWR_CLEANUP_INACTIVE);
+ cleanup_writable_pagetable(d);
/*
* XXX When we make this support 4MB superpages we should also deal with
include $(BASEDIR)/Rules.mk
ifeq ($(TARGET_ARCH),ia64)
+OBJS := $(subst dom_mem_ops.o,,$(OBJS))
+OBJS := $(subst grant_table.o,,$(OBJS))
OBJS := $(subst page_alloc.o,,$(OBJS))
OBJS := $(subst slab.o,,$(OBJS))
endif
}
break;
- case DOM0_GETPAGEFRAMEINFO:
- {
- struct pfn_info *page;
- unsigned long pfn = op->u.getpageframeinfo.pfn;
- domid_t dom = op->u.getpageframeinfo.domain;
- struct domain *d;
-
- ret = -EINVAL;
-
- if ( unlikely(pfn >= max_page) ||
- unlikely((d = find_domain_by_id(dom)) == NULL) )
- break;
-
- page = &frame_table[pfn];
-
- if ( likely(get_page(page, d)) )
- {
- ret = 0;
-
- op->u.getpageframeinfo.type = NOTAB;
-
- if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
- {
- switch ( page->u.inuse.type_info & PGT_type_mask )
- {
- case PGT_l1_page_table:
- op->u.getpageframeinfo.type = L1TAB;
- break;
- case PGT_l2_page_table:
- op->u.getpageframeinfo.type = L2TAB;
- break;
- case PGT_l3_page_table:
- op->u.getpageframeinfo.type = L3TAB;
- break;
- case PGT_l4_page_table:
- op->u.getpageframeinfo.type = L4TAB;
- break;
- }
- }
-
- put_page(page);
- }
-
- put_domain(d);
-
- copy_to_user(u_dom0_op, op, sizeof(*op));
- }
- break;
-
- case DOM0_IOPL:
- {
- extern long do_iopl(domid_t, unsigned int);
- ret = do_iopl(op->u.iopl.domain, op->u.iopl.iopl);
- }
- break;
-
#ifdef XEN_DEBUGGER
case DOM0_DEBUG:
{
}
break;
- case DOM0_PHYSINFO:
- {
- dom0_physinfo_t *pi = &op->u.physinfo;
-
- pi->ht_per_core = opt_noht ? 1 : ht_per_core;
- pi->cores = smp_num_cpus / pi->ht_per_core;
- pi->total_pages = max_page;
- pi->free_pages = avail_domheap_pages();
- pi->cpu_khz = cpu_khz;
-
- copy_to_user(u_dom0_op, op, sizeof(*op));
- ret = 0;
- }
- break;
-
case DOM0_PCIDEV_ACCESS:
{
extern int physdev_pci_access_modify(domid_t, int, int, int, int);
}
break;
- case DOM0_GETPAGEFRAMEINFO2:
- {
-#define GPF2_BATCH 128
- int n,j;
- int num = op->u.getpageframeinfo2.num;
- domid_t dom = op->u.getpageframeinfo2.domain;
- unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array;
- struct domain *d;
- unsigned long l_arr[GPF2_BATCH];
- ret = -ESRCH;
-
- if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
- break;
-
- if ( unlikely(num > 1024) )
- {
- ret = -E2BIG;
- break;
- }
-
- ret = 0;
- for( n = 0; n < num; )
- {
- int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
-
- if ( copy_from_user(l_arr, &s_ptr[n], k*sizeof(unsigned long)) )
- {
- ret = -EINVAL;
- break;
- }
-
- for( j = 0; j < k; j++ )
- {
- struct pfn_info *page;
- unsigned long mfn = l_arr[j];
-
- if ( unlikely(mfn >= max_page) )
- goto e2_err;
-
- page = &frame_table[mfn];
-
- if ( likely(get_page(page, d)) )
- {
- unsigned long type = 0;
-
- switch( page->u.inuse.type_info & PGT_type_mask )
- {
- case PGT_l1_page_table:
- type = L1TAB;
- break;
- case PGT_l2_page_table:
- type = L2TAB;
- break;
- case PGT_l3_page_table:
- type = L3TAB;
- break;
- case PGT_l4_page_table:
- type = L4TAB;
- break;
- }
-
- if ( page->u.inuse.type_info & PGT_pinned )
- type |= LPINTAB;
- l_arr[j] |= type;
- put_page(page);
- }
- else
- {
- e2_err:
- l_arr[j] |= XTAB;
- }
-
- }
-
- if ( copy_to_user(&s_ptr[n], l_arr, k*sizeof(unsigned long)) )
- {
- ret = -EINVAL;
- break;
- }
-
- n += j;
- }
-
- put_domain(d);
- }
- break;
-
case DOM0_SETDOMAINVMASSIST:
{
struct domain *d;
struct domain *domain_hash[DOMAIN_HASH_SIZE];
struct domain *domain_list;
-xmem_cache_t *domain_struct_cachep;
struct domain *dom0;
-void __init domain_startofday(void)
-{
- domain_struct_cachep = xmem_cache_create(
- "domain_cache", sizeof(struct domain),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
- if ( domain_struct_cachep == NULL )
- panic("No slab cache for domain structs.");
-}
-
struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
{
struct domain *d, **pd;
return -ENOMEM;
}
- /* initialise to machine_to_phys_mapping table to likely pfn */
- machine_to_phys_mapping[page-frame_table] = alloc_pfns;
+ /* Initialise the machine-to-phys mapping for this page. */
+ set_machinetophys(page_to_pfn(page), alloc_pfns);
}
return 0;
#include <xen/mm.h>
#include <xen/elf.h>
+#ifdef CONFIG_X86
+#define FORCE_XENELF_IMAGE 1
+#define ELF_ADDR p_vaddr
+#elif defined(__ia64__)
+#define FORCE_XENELF_IMAGE 0
+#define ELF_ADDR p_paddr
+#endif
+
static inline int is_loadable_phdr(Elf_Phdr *phdr)
{
return ((phdr->p_type == PT_LOAD) &&
if ( guestinfo == NULL )
{
printk("Not a Xen-ELF image: '__xen_guest' section not found.\n");
+#ifndef FORCE_XENELF_IMAGE
return -EINVAL;
+#endif
}
for ( h = 0; h < ehdr->e_phnum; h++ )
phdr = (Elf_Phdr *)(elfbase + ehdr->e_phoff + (h*ehdr->e_phentsize));
if ( !is_loadable_phdr(phdr) )
continue;
- if ( phdr->p_vaddr < kernstart )
- kernstart = phdr->p_vaddr;
- if ( (phdr->p_vaddr + phdr->p_memsz) > kernend )
- kernend = phdr->p_vaddr + phdr->p_memsz;
+ if ( phdr->ELF_ADDR < kernstart )
+ kernstart = phdr->ELF_ADDR;
+ if ( (phdr->ELF_ADDR + phdr->p_memsz) > kernend )
+ kernend = phdr->ELF_ADDR + phdr->p_memsz;
}
if ( (kernstart > kernend) ||
}
dsi->v_start = kernstart;
- if ( (p = strstr(guestinfo, "VIRT_BASE=")) != NULL )
- dsi->v_start = simple_strtoul(p+10, &p, 0);
- if ( (p = strstr(guestinfo, "PT_MODE_WRITABLE")) != NULL )
- dsi->use_writable_pagetables = 1;
+ if ( guestinfo != NULL )
+ {
+ if ( (p = strstr(guestinfo, "VIRT_BASE=")) != NULL )
+ dsi->v_start = simple_strtoul(p+10, &p, 0);
+
+ if ( (p = strstr(guestinfo, "PT_MODE_WRITABLE")) != NULL )
+ dsi->use_writable_pagetables = 1;
+ }
dsi->v_kernstart = kernstart;
dsi->v_kernend = kernend;
if ( !is_loadable_phdr(phdr) )
continue;
if ( phdr->p_filesz != 0 )
- memcpy((char *)phdr->p_vaddr, elfbase + phdr->p_offset,
+ memcpy((char *)phdr->ELF_ADDR, elfbase + phdr->p_offset,
phdr->p_filesz);
if ( phdr->p_memsz > phdr->p_filesz )
- memset((char *)phdr->p_vaddr + phdr->p_filesz, 0,
+ memset((char *)phdr->ELF_ADDR + phdr->p_filesz, 0,
phdr->p_memsz - phdr->p_filesz);
}
{
struct domain *d;
s_time_t now = NOW();
- struct list_head *ent;
- struct pfn_info *page;
printk("'%c' pressed -> dumping task queues (now=0x%X:%08X)\n", key,
(u32)(now>>32), (u32)now);
test_bit(DF_RUNNING, &d->flags) ? 'T':'F', d->flags,
atomic_read(&d->refcnt), d->tot_pages, d->xenheap_pages);
- if ( d->tot_pages < 10 )
- {
- list_for_each ( ent, &d->page_list )
- {
- page = list_entry(ent, struct pfn_info, list);
- printk("Page %08x: caf=%08x, taf=%08x\n",
- page_to_phys(page), page->count_info,
- page->u.inuse.type_info);
- }
- }
-
- page = virt_to_page(d->shared_info);
- printk("Shared_info@%08x: caf=%08x, taf=%08x\n",
- page_to_phys(page), page->count_info,
- page->u.inuse.type_info);
+ dump_pageframe_info(d);
printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n",
d->shared_info->vcpu_data[0].evtchn_upcall_pending,
/* Per-CPU periodic timer sends an event to the currently-executing domain. */
static struct ac_timer t_timer[NR_CPUS];
-extern xmem_cache_t *domain_struct_cachep;
-
void free_domain_struct(struct domain *d)
{
SCHED_OP(free_task, d);
- xmem_cache_free(domain_struct_cachep, d);
+ arch_free_domain_struct(d);
}
struct domain *alloc_domain_struct(void)
{
struct domain *d;
- if ( (d = xmem_cache_alloc(domain_struct_cachep)) == NULL )
+ if ( (d = arch_alloc_domain_struct()) == NULL )
return NULL;
memset(d, 0, sizeof(*d));
if ( SCHED_OP(alloc_task, d) < 0 )
{
- xmem_cache_free(domain_struct_cachep, d);
+ arch_free_domain_struct(d);
return NULL;
}
task_slice_t next_slice;
s32 r_time; /* time for new dom to run */
- cleanup_writable_pagetable(
- prev, PTWR_CLEANUP_ACTIVE | PTWR_CLEANUP_INACTIVE);
+ cleanup_writable_pagetable(prev);
perfc_incrc(sched_run);
#include <xen/sched.h>
#include <xen/softirq.h>
+#ifndef __ARCH_IRQ_STAT
irq_cpustat_t irq_stat[NR_CPUS];
+#endif
static softirq_handler softirq_handlers[NR_SOFTIRQS];
#include <xen/serial.h>
#include <xen/keyhandler.h>
#include <asm/uaccess.h>
+#include <asm/mm.h>
/* opt_console: comma-separated list of console outputs. */
static unsigned char opt_console[30] = "com1,vga";
string_param("conswitch", opt_conswitch);
static int xpos, ypos;
-static unsigned char *video = __va(0xB8000);
+static unsigned char *video;
#define CONSOLE_RING_SIZE 16392
typedef struct console_ring_st
return;
}
+ video = __va(0xB8000);
+
tmp = inb(0x3da);
outb(0x00, 0x3c0);
#define UART_ENABLED(_u) ((_u)->baud != 0)
#define DISABLE_UART(_u) ((_u)->baud = 0)
+#ifdef CONFIG_X86
+static inline int arch_serial_putc(uart_t *uart, unsigned char c)
+{
+ int space;
+ if ( (space = (inb(uart->io_base + LSR) & LSR_THRE)) )
+ outb(c, uart->io_base + THR);
+ return space;
+}
+#endif
+
/***********************
* PRIVATE FUNCTIONS
do {
spin_lock_irqsave(&uart->lock, flags);
- if ( (space = (inb(uart->io_base + LSR) & LSR_THRE)) )
- outb(c, uart->io_base + THR);
+ space = arch_serial_putc(uart, c);
spin_unlock_irqrestore(&uart->lock, flags);
}
while ( !space );
extern unsigned long m2p_start_mfn;
#endif
+#define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn)
+
#define DEFAULT_GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY+1)
#define DEFAULT_GDT_ADDRESS ((unsigned long)gdt_table)
ptwr_flush(PTWR_PT_INACTIVE); \
} while ( 0 )
-#define cleanup_writable_pagetable(_d, _w) \
+#define cleanup_writable_pagetable(_d) \
do { \
if ( unlikely(VM_ASSIST((_d), VMASST_TYPE_writable_pagetables)) ) \
- __cleanup_writable_pagetable(_w); \
+ __cleanup_writable_pagetable(PTWR_CLEANUP_ACTIVE | \
+ PTWR_CLEANUP_INACTIVE); \
} while ( 0 )
#ifndef NDEBUG
#ifndef __XEN_PUBLIC_ARCH_X86_32_H__
#define __XEN_PUBLIC_ARCH_X86_32_H__
+#ifndef PACKED
+/* GCC-specific way to pack structure definitions (no implicit padding). */
+#define PACKED __attribute__ ((packed))
+#endif
+
/*
* Pointers and other address fields inside interface structures are padded to
* 64 bits. This means that field alignments aren't different between 32- and
#ifndef __XEN_PUBLIC_ARCH_X86_64_H__
#define __XEN_PUBLIC_ARCH_X86_64_H__
+#ifndef PACKED
+/* GCC-specific way to pack structure definitions (no implicit padding). */
+#define PACKED __attribute__ ((packed))
+#endif
+
/* Pointers are naturally 64 bits in this architecture; no padding needed. */
#define _MEMORY_PADDING(_X)
#define MEMORY_PADDING
#ifndef __XEN_PUBLIC_XEN_H__
#define __XEN_PUBLIC_XEN_H__
-#ifndef PACKED
-/* GCC-specific way to pack structure definitions (no implicit padding). */
-#define PACKED __attribute__ ((packed))
-#endif
-
#if defined(__i386__)
#include "arch-x86_32.h"
#elif defined(__x86_64__)
#ifndef __XEN_DOMAIN_H__
#define __XEN_DOMAIN_H__
-extern void domain_startofday(void);
-
/*
* Arch-specifics.
*/
+extern void domain_startofday(void);
+
+extern struct domain *arch_alloc_domain_struct(void);
+
+extern void arch_free_domain_struct(struct domain *d);
+
extern void arch_do_createdomain(struct domain *d);
extern int arch_final_setup_guestos(
extern void domain_relinquish_memory(struct domain *d);
+extern void dump_pageframe_info(struct domain *d);
+
#endif /* __XEN_DOMAIN_H__ */
#define __XEN_GRANT_H__
#include <xen/config.h>
-#include <xen/mm.h>
#include <public/grant_table.h>
/* Active grant entry - used for shadowing GTF_permit_access grants. */
#ifndef __SLAB_H__
#define __SLAB_H__
+#include <xen/config.h>
+
+#ifdef __ARCH_HAS_SLAB_ALLOCATOR
+
+#include <asm/slab.h>
+
+#else
+
typedef struct xmem_cache_s xmem_cache_t;
#include <xen/mm.h>
extern void dump_slabinfo();
+#endif /* __ARCH_HAS_SLAB_ALLOCATOR */
+
#endif /* __SLAB_H__ */