ia64/xen-unstable

changeset 24:ab5d1c9a9c89

bitkeeper revision 1.7.3.4 (3df9ce16hEiD6_O61jJn_V8U2QQ8Jw)

user space domain builder code
author lynx@idefix.cl.cam.ac.uk
date Fri Dec 13 12:09:58 2002 +0000 (2002-12-13)
parents c89b11899064
children e092439bac40
files .rootkeys BitKeeper/etc/logging_ok xen-2.4.16/common/dom0_ops.c xen-2.4.16/common/domain.c xen-2.4.16/common/memory.c xen-2.4.16/include/hypervisor-ifs/hypervisor-if.h xen-2.4.16/include/xeno/dom0_ops.h xen-2.4.16/include/xeno/sched.h xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/Makefile xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_core.c xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_memory.c xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_ops.h xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/hypervisor_defs.h xenolinux-2.4.16-sparse/arch/xeno/mm/Makefile xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c xenolinux-2.4.16-sparse/arch/xeno/mm/mmu_context.c xenolinux-2.4.16-sparse/include/asm-xeno/mmu.h xenolinux-2.4.16-sparse/include/asm-xeno/mmu_context.h xenolinux-2.4.16-sparse/include/asm-xeno/pgtable.h
line diff
     1.1 --- a/.rootkeys	Fri Dec 06 23:41:47 2002 +0000
     1.2 +++ b/.rootkeys	Fri Dec 13 12:09:58 2002 +0000
     1.3 @@ -247,7 +247,9 @@ 3ddb79b7LLVJBGynxHSOh9A9l97sug xenolinux
     1.4  3ddb79b7UG2QiRAU-Wvc1Y_BLigu1Q xenolinux-2.4.16-sparse/arch/xeno/drivers/console/console.c
     1.5  3ddb79b75eo4PRXkT6Th9popt_SJhg xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/Makefile
     1.6  3ddb79b7Xyaoep6U0kLvx6Kx7OauDw xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_core.c
     1.7 +3df9ce13K7qSLBtHV-01QHPW62649Q xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_memory.c
     1.8  3ddb79b7PulSkF9m3c7K5MkxHRf4hA xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_ops.h
     1.9 +3df9ce13tITy-OuYx_zQemsvqqLTWA xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/hypervisor_defs.h
    1.10  3ddba759XOjcl_OF-52dOYq7sgMykQ xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/vfr.c
    1.11  3ddb79b7s7yYBioHidSkIoHtQxYmOw xenolinux-2.4.16-sparse/arch/xeno/drivers/network/Makefile
    1.12  3ddb79b7CpLL98ScdpbKkVBktlbCtQ xenolinux-2.4.16-sparse/arch/xeno/drivers/network/network.c
    1.13 @@ -283,8 +285,10 @@ 3ddb79b82kQ5oIXpxq3TUmlgxsLzLg xenolinux
    1.14  3ddb79b8qdD_svLCCAja_oP2w4Tn8Q xenolinux-2.4.16-sparse/arch/xeno/mm/Makefile
    1.15  3ddb79b8ukY8dsPYmR8eNk-aCzFPsQ xenolinux-2.4.16-sparse/arch/xeno/mm/extable.c
    1.16  3ddb79b856Zta9b3s0bgUCGbG1blvQ xenolinux-2.4.16-sparse/arch/xeno/mm/fault.c
    1.17 +3df9ce13dZ6UGDjZbUeZfyH4Hy6aCA xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c
    1.18  3ddb79b85fpsKT8A9WYnuJg03b715g xenolinux-2.4.16-sparse/arch/xeno/mm/hypervisor.c
    1.19  3ddb79b83Zj7Xn2QVhU4HeMuAC9FjA xenolinux-2.4.16-sparse/arch/xeno/mm/init.c
    1.20 +3df9ce13TRWIv0Mawm15zESP7jcT7A xenolinux-2.4.16-sparse/arch/xeno/mm/mmu_context.c
    1.21  3ddb79b7aKdTkbr3u6aze8tVwGh_TQ xenolinux-2.4.16-sparse/arch/xeno/vmlinux.lds
    1.22  3ddb79bbx682YH6vR2zbVOXwg73ULg xenolinux-2.4.16-sparse/drivers/block/ll_rw_blk.c
    1.23  3ddb79bcJfHdwrPsjqgI33_OsGdVCg xenolinux-2.4.16-sparse/drivers/block/rd.c
     2.1 --- a/BitKeeper/etc/logging_ok	Fri Dec 06 23:41:47 2002 +0000
     2.2 +++ b/BitKeeper/etc/logging_ok	Fri Dec 13 12:09:58 2002 +0000
     2.3 @@ -2,4 +2,5 @@ akw27@boulderdash.cl.cam.ac.uk
     2.4  kaf24@labyrinth.cl.cam.ac.uk
     2.5  kaf24@plym.cl.cam.ac.uk
     2.6  kaf24@striker.cl.cam.ac.uk
     2.7 +lynx@idefix.cl.cam.ac.uk
     2.8  smh22@boulderdash.cl.cam.ac.uk
     3.1 --- a/xen-2.4.16/common/dom0_ops.c	Fri Dec 06 23:41:47 2002 +0000
     3.2 +++ b/xen-2.4.16/common/dom0_ops.c	Fri Dec 13 12:09:58 2002 +0000
     3.3 @@ -1,3 +1,4 @@
     3.4 +
     3.5  /******************************************************************************
     3.6   * dom0_ops.c
     3.7   * 
     3.8 @@ -13,6 +14,7 @@
     3.9  #include <xeno/sched.h>
    3.10  #include <xeno/event.h>
    3.11  
    3.12 +extern unsigned int alloc_new_dom_mem(struct task_struct *, unsigned int);
    3.13  
    3.14  static unsigned int get_domnr(void)
    3.15  {
    3.16 @@ -42,6 +44,21 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    3.17      switch ( op.cmd )
    3.18      {
    3.19  
    3.20 +    case DOM0_STARTDOM:
    3.21 +    {
    3.22 +        struct task_struct * p = find_domain_by_id(op.u.meminfo.domain);
    3.23 +        ret = final_setup_guestos(p, &op.u.meminfo);
    3.24 +        if( ret != 0 ){
    3.25 +            p->state = TASK_DYING;
    3.26 +            release_task(p);
    3.27 +            break;
    3.28 +        }
    3.29 +        wake_up(p);
    3.30 +        reschedule(p);
    3.31 +        ret = p->domain;
    3.32 +    }
    3.33 +    break;
    3.34 +
    3.35      case DOM0_NEWDOMAIN:
    3.36      {
    3.37          struct task_struct *p;
    3.38 @@ -54,6 +71,20 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    3.39          p->domain = dom;
    3.40          pro = (pro+1) % smp_num_cpus;
    3.41          p->processor = pro;
    3.42 +
    3.43 +        /* if we are not booting dom 0 than only mem 
    3.44 +         * needs to be allocated
    3.45 +         */
    3.46 +        if(dom != 0){
    3.47 +            if(alloc_new_dom_mem(p, op.u.newdomain.memory_kb) != 0){
    3.48 +                ret = -1;
    3.49 +                break;
    3.50 +            }
    3.51 +            ret = p->domain;
    3.52 +            break;
    3.53 +        }
    3.54 +
    3.55 +        /* executed only in case of domain 0 */
    3.56          ret = setup_guestos(p, &op.u.newdomain);    /* Load guest OS into @p */
    3.57          if ( ret != 0 ) 
    3.58          {
    3.59 @@ -81,6 +112,16 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    3.60      }
    3.61      break;
    3.62  
    3.63 +    case DOM0_MAPTASK:
    3.64 +    {
    3.65 +        unsigned int dom = op.u.mapdomts.domain;
    3.66 +        
    3.67 +        op.u.mapdomts.ts_phy_addr = __pa(find_domain_by_id(dom));
    3.68 +        copy_to_user(u_dom0_op, &op, sizeof(op));
    3.69 +
    3.70 +    }
    3.71 +    break;
    3.72 +
    3.73      default:
    3.74          ret = -ENOSYS;
    3.75  
     4.1 --- a/xen-2.4.16/common/domain.c	Fri Dec 06 23:41:47 2002 +0000
     4.2 +++ b/xen-2.4.16/common/domain.c	Fri Dec 13 12:09:58 2002 +0000
     4.3 @@ -1,3 +1,4 @@
     4.4 +
     4.5  #include <xeno/config.h>
     4.6  #include <xeno/init.h>
     4.7  #include <xeno/lib.h>
     4.8 @@ -11,6 +12,16 @@
     4.9  #include <xeno/dom0_ops.h>
    4.10  #include <asm/io.h>
    4.11  
    4.12 +#include <asm/msr.h>
    4.13 +#include <xeno/multiboot.h>
    4.14 +
    4.15 +#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
    4.16 +#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
    4.17 +
    4.18 +extern int nr_mods;
    4.19 +extern module_t *mod;
    4.20 +extern unsigned char *cmdline;
    4.21 +
    4.22  rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;
    4.23  
    4.24  schedule_data_t schedule_data[NR_CPUS];
    4.25 @@ -322,7 +333,7 @@ asmlinkage void schedule(void)
    4.26  }
    4.27  
    4.28  
    4.29 -static unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
    4.30 +unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
    4.31  {
    4.32  
    4.33      struct list_head *temp;
    4.34 @@ -370,6 +381,124 @@ static unsigned int alloc_new_dom_mem(st
    4.35      return 0;
    4.36  }
    4.37  
    4.38 +int final_setup_guestos(struct task_struct * p, dom_meminfo_t * meminfo)
    4.39 +{
    4.40 +    l2_pgentry_t * l2tab;
    4.41 +    l1_pgentry_t * l1tab;
    4.42 +    start_info_t * virt_startinfo_addr;
    4.43 +    unsigned long virt_stack_addr;
    4.44 +    unsigned long long time;
    4.45 +    net_ring_t *net_ring;
    4.46 +    char *dst;    // temporary
    4.47 +    int i;        // temporary
    4.48 +
    4.49 +    /* entries 0xe0000000 onwards in page table must contain hypervisor
    4.50 +     * mem mappings - set them up.
    4.51 +     */
    4.52 +    l2tab = (l2_pgentry_t *)__va(meminfo->l2_pgt_addr);
    4.53 +    memcpy(l2tab + DOMAIN_ENTRIES_PER_L2_PAGETABLE, 
    4.54 +        ((l2_pgentry_t *)idle0_pg_table) + DOMAIN_ENTRIES_PER_L2_PAGETABLE, 
    4.55 +        (ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE) * sizeof(l2_pgentry_t));
    4.56 +    p->mm.pagetable = mk_pagetable((unsigned long)l2tab);
    4.57 +
    4.58 +    /* map in the shared info structure */
    4.59 +    l2tab = pagetable_ptr(p->mm.pagetable) + l2_table_offset(meminfo->virt_shinfo_addr);
    4.60 +    l1tab = l2_pgentry_to_l1(*l2tab) + l1_table_offset(meminfo->virt_shinfo_addr);
    4.61 +    *l1tab = mk_l1_pgentry(__pa(p->shared_info) | L1_PROT);
    4.62 +
    4.63 +    /* set up the shared info structure */
    4.64 +    rdtscll(time);
    4.65 +    p->shared_info->wall_time    = time;
    4.66 +    p->shared_info->domain_time  = time;
    4.67 +    p->shared_info->ticks_per_ms = ticks_per_usec * 1000;
    4.68 +
    4.69 +    /* we pass start info struct to guest os as function parameter on stack */
    4.70 +    virt_startinfo_addr = (start_info_t *)meminfo->virt_startinfo_addr;
    4.71 +    virt_stack_addr = (unsigned long)virt_startinfo_addr;       
    4.72 +
    4.73 +    /* we need to populate start_info struct within the context of the
    4.74 +     * new domain. thus, temporarely install its pagetables.
    4.75 +     */
    4.76 +    __cli();
    4.77 +    __asm__ __volatile__ (
    4.78 +        "mov %%eax, %%cr3"
    4.79 +        : : "a" (__pa(pagetable_ptr(p->mm.pagetable))));
    4.80 +
    4.81 +    memset(virt_startinfo_addr, 0, sizeof(virt_startinfo_addr));
    4.82 +    virt_startinfo_addr->nr_pages = p->tot_pages;
    4.83 +    virt_startinfo_addr->shared_info = (shared_info_t *)meminfo->virt_shinfo_addr;
    4.84 +    virt_startinfo_addr->pt_base = meminfo->virt_load_addr + 
    4.85 +                    ((p->tot_pages - 1) << PAGE_SHIFT);
    4.86 +
    4.87 +    /* now, this is just temprorary before we switch to pseudo phys
    4.88 +     * addressing. this works only for contiguous chunks of memory!!!
    4.89 +     */
    4.90 +    virt_startinfo_addr->phys_base = p->pg_head << PAGE_SHIFT;
    4.91 +    
    4.92 +    /* Add virtual network interfaces and point to them in startinfo. */
    4.93 +    while (meminfo->num_vifs-- > 0) {
    4.94 +        net_ring = create_net_vif(p->domain);
    4.95 +        if (!net_ring) panic("no network ring!\n");
    4.96 +    }
    4.97 +    virt_startinfo_addr->net_rings = p->net_ring_base;
    4.98 +    virt_startinfo_addr->num_net_rings = p->num_net_vifs;
    4.99 +
   4.100 +    /* Add block io interface */
   4.101 +    virt_startinfo_addr->blk_ring = p->blk_ring_base;
   4.102 +
   4.103 +    /* i do not think this has to be done any more, temporary */
   4.104 +    /* We tell OS about any modules we were given. */
   4.105 +    if ( nr_mods > 1 )
   4.106 +    {
   4.107 +        virt_startinfo_addr->mod_start = 
   4.108 +            (mod[1].mod_start-mod[0].mod_start-12) + meminfo->virt_load_addr;
   4.109 +        virt_startinfo_addr->mod_len = 
   4.110 +            mod[nr_mods-1].mod_end - mod[1].mod_start;
   4.111 +    }
   4.112 +
   4.113 +    /* temporary, meminfo->cmd_line just needs to be copied info start info */
   4.114 +    dst = virt_startinfo_addr->cmd_line;
   4.115 +    if ( mod[0].string )
   4.116 +    {
   4.117 +        char *modline = (char *)__va(mod[0].string);
   4.118 +        for ( i = 0; i < 255; i++ )
   4.119 +        {
   4.120 +            if ( modline[i] == '\0' ) break;
   4.121 +            *dst++ = modline[i];
   4.122 +        }
   4.123 +    }
   4.124 +    *dst = '\0';
   4.125 +
   4.126 +    if ( opt_nfsroot )
   4.127 +    {
   4.128 +        unsigned char boot[150];
   4.129 +        unsigned char ipbase[20], nfsserv[20], gateway[20], netmask[20];
   4.130 +        unsigned char nfsroot[70];
   4.131 +        snprintf(nfsroot, 70, opt_nfsroot, p->domain); 
   4.132 +        snprintf(boot, 200,
   4.133 +                " root=/dev/nfs ip=%s:%s:%s:%s::eth0:off nfsroot=%s",
   4.134 +                 quad_to_str(opt_ipbase + p->domain, ipbase),
   4.135 +                 quad_to_str(opt_nfsserv, nfsserv),
   4.136 +                 quad_to_str(opt_gateway, gateway),
   4.137 +                 quad_to_str(opt_netmask, netmask),
   4.138 +                 nfsroot);
   4.139 +        strcpy(dst, boot);
   4.140 +    }
   4.141 +
   4.142 +    /* Reinstate the caller's page tables. */
   4.143 +    __asm__ __volatile__ (
   4.144 +        "mov %%eax,%%cr3"
   4.145 +        : : "a" (__pa(pagetable_ptr(current->mm.pagetable))));    
   4.146 +    __sti();
   4.147 +
   4.148 +    new_thread(p, 
   4.149 +               (unsigned long)meminfo->virt_load_addr, 
   4.150 +               (unsigned long)virt_stack_addr, 
   4.151 +               (unsigned long)virt_startinfo_addr);
   4.152 +
   4.153 +    return 0;
   4.154 +}
   4.155 +     
   4.156  /*
   4.157   * Initial load map:
   4.158   *  start_address:
   4.159 @@ -385,15 +514,8 @@ static unsigned int alloc_new_dom_mem(st
   4.160   *      <one page>
   4.161   */
   4.162  #define MB_PER_DOMAIN 16
   4.163 -#include <asm/msr.h>
   4.164 -#include <xeno/multiboot.h>
   4.165 -extern int nr_mods;
   4.166 -extern module_t *mod;
   4.167 -extern unsigned char *cmdline;
   4.168  int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
   4.169  {
   4.170 -#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
   4.171 -#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
   4.172  #define ALLOC_PAGE_FROM_DOMAIN() \
   4.173    ({ alloc_address -= PAGE_SIZE; __va(alloc_address); })
   4.174      char *src, *dst;
   4.175 @@ -410,6 +532,7 @@ int setup_guestos(struct task_struct *p,
   4.176      l1_pgentry_t *l1tab = NULL;
   4.177      struct pfn_info *page = NULL;
   4.178      net_ring_t *net_ring;
   4.179 +    blk_ring_t *blk_ring;
   4.180  
   4.181      if ( strncmp(__va(mod[0].mod_start), "XenoGues", 8) )
   4.182      {
   4.183 @@ -571,6 +694,8 @@ int setup_guestos(struct task_struct *p,
   4.184      virt_startinfo_address->phys_base = start_address;
   4.185      /* NB. Next field will be NULL if dom != 0. */
   4.186      virt_startinfo_address->frame_table = virt_ftable_start_addr;
   4.187 +    virt_startinfo_address->frame_table_len = ft_size;
   4.188 +    virt_startinfo_address->frame_table_pa = __pa(frame_table);
   4.189  
   4.190      /* Add virtual network interfaces and point to them in startinfo. */
   4.191      while (params->num_vifs-- > 0) {
     5.1 --- a/xen-2.4.16/common/memory.c	Fri Dec 06 23:41:47 2002 +0000
     5.2 +++ b/xen-2.4.16/common/memory.c	Fri Dec 13 12:09:58 2002 +0000
     5.3 @@ -1,3 +1,5 @@
     5.4 +
     5.5 +
     5.6  /******************************************************************************
     5.7   * memory.c
     5.8   * 
     5.9 @@ -439,11 +441,15 @@ static void put_page(unsigned long page_
    5.10      struct pfn_info *page;
    5.11      ASSERT(page_nr < max_page);
    5.12      page = frame_table + page_nr;
    5.13 -    ASSERT((page->flags & PG_domain_mask) == current->domain);
    5.14 -    ASSERT((((page->flags & PG_type_mask) == PGT_writeable_page) &&
    5.15 -            (page_type_count(page) != 0)) ||
    5.16 -           (((page->flags & PG_type_mask) == PGT_none) &&
    5.17 -            (page_type_count(page) == 0)));
    5.18 +
    5.19 +    if(current->domain != 0){
    5.20 +        ASSERT((page->flags & PG_domain_mask) == current->domain);
    5.21 +        ASSERT((((page->flags & PG_type_mask) == PGT_writeable_page) &&
    5.22 +             (page_type_count(page) != 0)) ||
    5.23 +            (((page->flags & PG_type_mask) == PGT_none) &&
    5.24 +             (page_type_count(page) == 0)));
    5.25 +    }
    5.26 +
    5.27      ASSERT((!writeable) || (page_type_count(page) != 0));
    5.28      if ( writeable && (put_page_type(page) == 0) )
    5.29          page->flags &= ~PG_type_mask;
    5.30 @@ -559,39 +565,23 @@ int do_process_page_updates(page_update_
    5.31              kill_domain_with_errmsg("Cannot read page update request");
    5.32          }
    5.33  
    5.34 +        pfn = cur.ptr >> PAGE_SHIFT;
    5.35 +        if ( pfn >= max_page )
    5.36 +        {
    5.37 +            MEM_LOG("Page out of range (%08lx > %08lx)", pfn, max_page);
    5.38 +            kill_domain_with_errmsg("Page update request out of range");
    5.39 +        }
    5.40 +
    5.41          err = 1;
    5.42  
    5.43 -        pfn = cur.ptr >> PAGE_SHIFT;
    5.44 -        if ( !pfn )
    5.45 +        /* Least significant bits of 'ptr' demux the operation type. */
    5.46 +        switch ( cur.ptr & (sizeof(l1_pgentry_t)-1) )
    5.47          {
    5.48 -            switch ( cur.ptr )
    5.49 -            {
    5.50 -            case PGREQ_ADD_BASEPTR:
    5.51 -                err = get_l2_table(cur.val >> PAGE_SHIFT);
    5.52 -                break;
    5.53 -            case PGREQ_REMOVE_BASEPTR:
    5.54 -                if ( cur.val == __pa(pagetable_ptr(current->mm.pagetable)) )
    5.55 -                {
    5.56 -                    MEM_LOG("Attempt to remove current baseptr! %08lx",
    5.57 -                            cur.val);
    5.58 -                }
    5.59 -                else
    5.60 -                {
    5.61 -                    err = put_l2_table(cur.val >> PAGE_SHIFT);
    5.62 -                }
    5.63 -                break;
    5.64 -            default:
    5.65 -                MEM_LOG("Invalid page update command %08lx", cur.ptr);
    5.66 -                break;
    5.67 -            }
    5.68 -        }
    5.69 -        else if ( (cur.ptr & (sizeof(l1_pgentry_t)-1)) || (pfn >= max_page) )
    5.70 -        {
    5.71 -            MEM_LOG("Page out of range (%08lx>%08lx) or misalign %08lx",
    5.72 -                    pfn, max_page, cur.ptr);
    5.73 -        }
    5.74 -        else
    5.75 -        {
    5.76 +
    5.77 +            /*
    5.78 +             * PGREQ_NORMAL: Normal update to any level of page table.
    5.79 +             */
    5.80 +        case PGREQ_NORMAL:
    5.81              page = frame_table + pfn;
    5.82              flags = page->flags;
    5.83              if ( (flags & PG_domain_mask) == current->domain )
    5.84 @@ -607,20 +597,63 @@ int do_process_page_updates(page_update_
    5.85                                         mk_l2_pgentry(cur.val)); 
    5.86                      break;
    5.87                  default:
    5.88 -                    /*
    5.89 -                     * This might occur if a page-table update is
    5.90 -                     * requested before we've inferred the type
    5.91 -                     * of the containing page. It shouldn't happen
    5.92 -                     * if page tables are built strictly top-down, so
    5.93 -                     * we have a MEM_LOG warning message.
    5.94 -                     */
    5.95 -                    MEM_LOG("Unnecessary update to non-pt page %08lx",
    5.96 -                            cur.ptr);
    5.97 -                    *(unsigned long *)__va(cur.ptr) = cur.val;
    5.98 -                    err = 0;
    5.99 +                    MEM_LOG("Update to non-pt page %08lx", cur.ptr);
   5.100                      break;
   5.101                  }
   5.102              }
   5.103 +            break;
   5.104 +
   5.105 +            /*
   5.106 +             * PGREQ_ADD_BASEPTR: Announce a new top-level page table.
   5.107 +             */
   5.108 +        case PGREQ_ADD_BASEPTR:
   5.109 +            err = get_l2_table(cur.val >> PAGE_SHIFT);
   5.110 +            break;
   5.111 +
   5.112 +            /*
   5.113 +             * PGREQ_REMOVE_BASEPTR: Destroy reference to a top-level page
   5.114 +             * table.
   5.115 +             */
   5.116 +        case PGREQ_REMOVE_BASEPTR:
   5.117 +            pfn = cur.val >> PAGE_SHIFT;
   5.118 +            if ( pfn != (__pa(pagetable_ptr(current->mm.pagetable))
   5.119 +                         >> PAGE_SHIFT) )
   5.120 +            {
   5.121 +                err = put_l2_table(pfn);
   5.122 +            }
   5.123 +            else
   5.124 +            {
   5.125 +                MEM_LOG("Attempt to remove current baseptr! %08lx",
   5.126 +                        cur.val);
   5.127 +            }
   5.128 +            break;
   5.129 +
   5.130 +            /*
   5.131 +             * PGREQ_UNCHECKED_UPDATE: Make an unchecked update to a
   5.132 +             * bottom-level page-table entry.
   5.133 +             * Restrictions apply:
   5.134 +             *  1. Update only allowed by domain 0.
   5.135 +             *  2. Update must be to a level-1 pte belonging to dom0.
   5.136 +             */
   5.137 +        case PGREQ_UNCHECKED_UPDATE:
   5.138 +            cur.ptr &= ~(sizeof(l1_pgentry_t) - 1);
   5.139 +            page = frame_table + pfn;
   5.140 +            flags = page->flags;
   5.141 +            if ( (flags | current->domain) == PGT_l1_page_table )
   5.142 +            {
   5.143 +                *(unsigned long *)__va(cur.ptr) = cur.val;
   5.144 +                err = 0;
   5.145 +            }
   5.146 +            else
   5.147 +            {
   5.148 +                MEM_LOG("UNCHECKED_UPDATE: Bad domain %d, or"
   5.149 +                        " bad pte type %08lx", current->domain, flags);
   5.150 +            }
   5.151 +            break;
   5.152 +
   5.153 +        default:
   5.154 +            MEM_LOG("Invalid page update command %08lx", cur.ptr);
   5.155 +            break;
   5.156          }
   5.157  
   5.158          if ( err )
     6.1 --- a/xen-2.4.16/include/hypervisor-ifs/hypervisor-if.h	Fri Dec 06 23:41:47 2002 +0000
     6.2 +++ b/xen-2.4.16/include/hypervisor-ifs/hypervisor-if.h	Fri Dec 13 12:09:58 2002 +0000
     6.3 @@ -1,3 +1,4 @@
     6.4 +
     6.5  /******************************************************************************
     6.6   * hypervisor-if.h
     6.7   * 
     6.8 @@ -21,8 +22,15 @@ typedef struct trap_info_st
     6.9  
    6.10  typedef struct
    6.11  {
    6.12 -#define PGREQ_ADD_BASEPTR    0
    6.13 -#define PGREQ_REMOVE_BASEPTR 1
    6.14 +/* PGREQ_XXX: specified in lest-significant bits of 'ptr' field. */
    6.15 +/* A normal page-table update request. */
    6.16 +#define PGREQ_NORMAL           0
    6.17 +/* Announce a new top-level page table. */
    6.18 +#define PGREQ_ADD_BASEPTR      1
    6.19 +/* Destroy an existing top-level page table. */
    6.20 +#define PGREQ_REMOVE_BASEPTR   2
    6.21 +/* Make an unchecked update to a base-level pte. */
    6.22 +#define PGREQ_UNCHECKED_UPDATE 3
    6.23      unsigned long ptr, val; /* *ptr = val */
    6.24  } page_update_request_t;
    6.25  
    6.26 @@ -144,6 +152,8 @@ typedef struct start_info_st {
    6.27      int num_net_rings;
    6.28      blk_ring_t *blk_ring;         /* block io communication rings */
    6.29      unsigned long frame_table;    /* mapping of the frame_table for dom0 */
    6.30 +    unsigned long frame_table_len;
    6.31 +    unsigned long frame_table_pa; /* frame_table physical address */
    6.32      unsigned char cmd_line[1];    /* variable-length */
    6.33  } start_info_t;
    6.34  
     7.1 --- a/xen-2.4.16/include/xeno/dom0_ops.h	Fri Dec 06 23:41:47 2002 +0000
     7.2 +++ b/xen-2.4.16/include/xeno/dom0_ops.h	Fri Dec 13 12:09:58 2002 +0000
     7.3 @@ -1,9 +1,10 @@
     7.4 +
     7.5  /******************************************************************************
     7.6   * dom0_ops.h
     7.7   * 
     7.8   * Process command requests from domain-0 guest OS.
     7.9   * 
    7.10 - * Copyright (c) 2002, K A Fraser
    7.11 + * Copyright (c) 2002, K A Fraser, B Dragovic
    7.12   */
    7.13  
    7.14  #ifndef __DOM0_OPS_H__
    7.15 @@ -11,11 +12,16 @@
    7.16  
    7.17  #define DOM0_NEWDOMAIN   0
    7.18  #define DOM0_KILLDOMAIN  1
    7.19 +#define DOM0_MAPTASK     2
    7.20 +#define DOM0_STARTDOM    4
    7.21  
    7.22 -typedef struct dom0_newdomain_st
    7.23 +#define MAX_CMD_LEN    256
    7.24 +
    7.25 +typedef struct dom0_newdomain_st 
    7.26  {
    7.27      unsigned int memory_kb;
    7.28 -    unsigned int num_vifs;
    7.29 +    unsigned int num_vifs;  // temporary
    7.30 +    unsigned int domain; 
    7.31  } dom0_newdomain_t;
    7.32  
    7.33  typedef struct dom0_killdomain_st
    7.34 @@ -23,6 +29,23 @@ typedef struct dom0_killdomain_st
    7.35      unsigned int domain;
    7.36  } dom0_killdomain_t;
    7.37  
    7.38 +typedef struct dom0_map_ts
    7.39 +{
    7.40 +    unsigned int domain;
    7.41 +    unsigned long ts_phy_addr;
    7.42 +} dom0_tsmap_t;
    7.43 +
    7.44 +typedef struct domain_launch
    7.45 +{
    7.46 +    unsigned long domain;
    7.47 +    unsigned long l2_pgt_addr;
    7.48 +    unsigned long virt_load_addr;
    7.49 +    unsigned long virt_shinfo_addr;
    7.50 +    unsigned long virt_startinfo_addr;
    7.51 +    unsigned int num_vifs;
    7.52 +    char cmd_line[MAX_CMD_LEN];
    7.53 +} dom_meminfo_t;
    7.54 +
    7.55  typedef struct dom0_op_st
    7.56  {
    7.57      unsigned long cmd;
    7.58 @@ -30,6 +53,8 @@ typedef struct dom0_op_st
    7.59      {
    7.60          dom0_newdomain_t newdomain;
    7.61          dom0_killdomain_t killdomain;
    7.62 +        dom0_tsmap_t mapdomts;
    7.63 +        dom_meminfo_t meminfo;
    7.64      }
    7.65      u;
    7.66  } dom0_op_t;
     8.1 --- a/xen-2.4.16/include/xeno/sched.h	Fri Dec 06 23:41:47 2002 +0000
     8.2 +++ b/xen-2.4.16/include/xeno/sched.h	Fri Dec 13 12:09:58 2002 +0000
     8.3 @@ -1,3 +1,4 @@
     8.4 +
     8.5  #ifndef _LINUX_SCHED_H
     8.6  #define _LINUX_SCHED_H
     8.7  
     8.8 @@ -41,10 +42,19 @@ extern struct mm_struct init_mm;
     8.9  #include <xeno/block.h>
    8.10  
    8.11  struct task_struct {
    8.12 +
    8.13      int processor;
    8.14      int state, hyp_events;
    8.15      unsigned int domain;
    8.16  
    8.17 +    /* index into frame_table threading pages belonging to this
    8.18 +     * domain together. these are placed at the top of the structure
    8.19 +     * to avoid nasty padding for various kernel structs when using
    8.20 +     * task_struct in user space
    8.21 +     */
    8.22 +    unsigned long pg_head;
    8.23 +    unsigned int tot_pages;
    8.24 +
    8.25      /* An unsafe pointer into a shared data area. */
    8.26      shared_info_t *shared_info;
    8.27      
    8.28 @@ -75,13 +85,7 @@ struct task_struct {
    8.29      struct mm_struct *active_mm;
    8.30      struct thread_struct thread;
    8.31      struct task_struct *prev_task, *next_task;
    8.32 -	
    8.33 -    /* index into frame_table threading pages belonging to this
    8.34 -     * domain together
    8.35 -     */
    8.36 -    unsigned long pg_head;
    8.37 -    unsigned int tot_pages;
    8.38 -
    8.39 +    
    8.40      unsigned long flags;
    8.41  };
    8.42  
    8.43 @@ -126,6 +130,7 @@ extern struct task_struct first_task_str
    8.44  
    8.45  extern struct task_struct *do_newdomain(void);
    8.46  extern int setup_guestos(struct task_struct *p, dom0_newdomain_t *params);
    8.47 +extern int final_setup_guestos(struct task_struct *p, dom_meminfo_t *);
    8.48  
    8.49  struct task_struct *find_domain_by_id(unsigned int dom);
    8.50  extern void release_task(struct task_struct *);
     9.1 --- a/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/Makefile	Fri Dec 06 23:41:47 2002 +0000
     9.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/Makefile	Fri Dec 13 12:09:58 2002 +0000
     9.3 @@ -1,3 +1,3 @@
     9.4  O_TARGET := dom0.o
     9.5 -obj-y := dom0_core.o vfr.o
     9.6 +obj-y := dom0_memory.o dom0_core.o vfr.o
     9.7  include $(TOPDIR)/Rules.make
    10.1 --- a/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_core.c	Fri Dec 06 23:41:47 2002 +0000
    10.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_core.c	Fri Dec 13 12:09:58 2002 +0000
    10.3 @@ -1,9 +1,10 @@
    10.4 +
    10.5  /******************************************************************************
    10.6   * dom0_core.c
    10.7   * 
    10.8   * Interface to privileged domain-0 commands.
    10.9   * 
   10.10 - * Copyright (c) 2002, K A Fraser
   10.11 + * Copyright (c) 2002, K A Fraser, B Dragovic
   10.12   */
   10.13  
   10.14  #include <linux/config.h>
   10.15 @@ -15,13 +16,49 @@
   10.16  #include <linux/errno.h>
   10.17  #include <linux/proc_fs.h>
   10.18  
   10.19 +#include <linux/mm.h>
   10.20 +#include <linux/mman.h>
   10.21 +#include <linux/swap.h>
   10.22 +#include <linux/smp_lock.h>
   10.23 +#include <linux/swapctl.h>
   10.24 +#include <linux/iobuf.h>
   10.25 +#include <linux/highmem.h>
   10.26 +#include <linux/pagemap.h>
   10.27 +
   10.28 +#include <asm/pgalloc.h>
   10.29 +#include <asm/pgtable.h>
   10.30 +#include <asm/uaccess.h>
   10.31 +#include <asm/tlb.h>
   10.32 +
   10.33  #include "dom0_ops.h"
   10.34 +#include "hypervisor_defs.h"
   10.35  
   10.36 -static struct proc_dir_entry *proc_dom0;
   10.37 +#define XENO_BASE       "xeno"          // proc file name defs should be in separate .h
   10.38 +#define DOM0_CMD_INTF   "dom0_cmd"
   10.39 +#define DOM0_FT         "frame_table"
   10.40 +#define DOM0_NEWDOM     "new_dom_id"
   10.41 +
   10.42 +#define MAX_LEN         16
   10.43 +#define DOM_DIR         "dom"
   10.44 +#define DOM_TS          "task_data"
   10.45 +#define DOM_MEM         "mem"
   10.46 +
   10.47 +static struct proc_dir_entry *xeno_base;
   10.48 +static struct proc_dir_entry *dom0_cmd_intf;
   10.49 +static struct proc_dir_entry *proc_ft;
   10.50 +
   10.51 +unsigned long direct_mmap(unsigned long, unsigned long, pgprot_t, int, int);
   10.52 +int direct_unmap(unsigned long, unsigned long);
   10.53 +int direct_disc_unmap(unsigned long, unsigned long, int);
   10.54 +
   10.55 +/* frame_table mapped from dom0 */
   10.56 +frame_table_t * frame_table;
   10.57 +unsigned long frame_table_len;
   10.58 +unsigned long frame_table_pa;
   10.59  
   10.60  static unsigned char readbuf[1204];
   10.61  
   10.62 -static int dom0_read_proc(char *page, char **start, off_t off,
   10.63 +static int cmd_read_proc(char *page, char **start, off_t off,
   10.64                            int count, int *eof, void *data)
   10.65  {
   10.66      strcpy(page, readbuf);
   10.67 @@ -31,97 +68,314 @@ static int dom0_read_proc(char *page, ch
   10.68      return strlen(page);
   10.69  }
   10.70  
   10.71 +static ssize_t ts_read(struct file * file, char * buff, size_t size, loff_t * off)
   10.72 +{
   10.73 +    dom0_op_t op;
   10.74 +    unsigned long addr;
   10.75 +    pgprot_t prot;
   10.76 +    int ret = 0;
   10.77  
   10.78 -static int dom0_write_proc(struct file *file, const char *buffer, 
   10.79 +    /* retrieve domain specific data from proc_dir_entry */
   10.80 +    dom_procdata_t * dom_data = (dom_procdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
   10.81 +    
   10.82 +    /* 
   10.83 +     * get the phys addr of the task struct for the requested
   10.84 +     * domain
   10.85 +     */
   10.86 +    op.cmd = DOM0_MAPTASK;
   10.87 +    op.u.mapdomts.domain = dom_data->domain;
   10.88 +    op.u.mapdomts.ts_phy_addr = -1;
   10.89 +
   10.90 +    ret = HYPERVISOR_dom0_op(&op);
   10.91 +    if(ret != 0)
   10.92 +       return -EAGAIN;
   10.93 +
   10.94 +    prot = PAGE_SHARED; 
   10.95 +
   10.96 +    /* remap the range using xen specific routines */
   10.97 +    addr = direct_mmap(op.u.mapdomts.ts_phy_addr, PAGE_SIZE, prot, 0, 0);
   10.98 +    copy_to_user((unsigned long *)buff, &addr, sizeof(addr));
   10.99 +    dom_data->map_size = PAGE_SIZE;
  10.100 +
  10.101 +    return sizeof(addr);
  10.102 +     
  10.103 +}
  10.104 +
  10.105 +static ssize_t ts_write(struct file * file, const char * buff, size_t size , loff_t * off)
  10.106 +{
  10.107 +    unsigned long addr;
  10.108 +    dom_procdata_t * dom_data = (dom_procdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
  10.109 +    
  10.110 +    copy_from_user(&addr, (unsigned long *)buff, sizeof(addr));
  10.111 +    
  10.112 +    if(direct_unmap(addr, dom_data->map_size) == 0){
  10.113 +        return sizeof(addr);
  10.114 +    } else {
  10.115 +        return -1;
  10.116 +    }
  10.117 +}
  10.118 + 
  10.119 +struct file_operations ts_ops = {
  10.120 +    read:   ts_read,
  10.121 +    write:  ts_write,
  10.122 +};
  10.123 +
  10.124 +static void create_proc_dom_entries(int dom)
  10.125 +{
  10.126 +    struct proc_dir_entry * dir;
  10.127 +    struct proc_dir_entry * file;
  10.128 +    dom_procdata_t * dom_data;
  10.129 +    char dir_name[MAX_LEN];
  10.130 +
  10.131 +    snprintf(dir_name, MAX_LEN, "%s%d", DOM_DIR, dom);
  10.132 +
  10.133 +    dom_data = (dom_procdata_t *)kmalloc(GFP_KERNEL, 128);
  10.134 +    dom_data->domain = dom;
  10.135 +
  10.136 +    dir = proc_mkdir(dir_name, xeno_base);
  10.137 +    dir->data = dom_data;
  10.138 +
  10.139 +    file = create_proc_entry(DOM_TS, 0600, dir);
  10.140 +    if(file != NULL)
  10.141 +    {   
  10.142 +        file->owner = THIS_MODULE;
  10.143 +        file->nlink = 1;
  10.144 +        file->proc_fops = &ts_ops;
  10.145 +    
  10.146 +        file->data = dom_data;
  10.147 +    }
  10.148 +}
  10.149 +
  10.150 +static int dom_mem_mmap(struct file *file, struct vm_area_struct *vma)
  10.151 +{
  10.152 +    proc_memdata_t * mem_data = (proc_memdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
  10.153 +
  10.154 +    /*
  10.155 +     * do not dump contents to core file,
  10.156 +     * do not swap out.
  10.157 +     */
  10.158 +    vma->vm_flags |= VM_IO;
  10.159 +    vma->vm_flags |= VM_RESERVED;
  10.160 +
  10.161 +    if(direct_remap_disc_page_range(vma->vm_start, mem_data->pfn, mem_data->tot_pages, 
  10.162 +                            vma->vm_page_prot))
  10.163 +        return -EAGAIN;
  10.164 +    
  10.165 +    return 0;
  10.166 +}
  10.167 +
  10.168 +static ssize_t dom_mem_write(struct file * file, const char * buff, size_t size , 
  10.169 +                loff_t * off)
  10.170 +{
  10.171 +    unsigned long addr;
  10.172 +    proc_memdata_t * mem_data = (proc_memdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
  10.173 +    
  10.174 +    copy_from_user(&addr, (unsigned long *)buff, sizeof(addr));
  10.175 +    
  10.176 +    if(direct_disc_unmap(addr, mem_data->pfn, mem_data->tot_pages) == 0){
  10.177 +        return sizeof(addr);
  10.178 +    } else {
  10.179 +        return -1;
  10.180 +    }
  10.181 +}
  10.182 +
  10.183 +static ssize_t dom_mem_read(struct file * file, char * buff, size_t size, loff_t * off)
  10.184 +{
  10.185 +    unsigned long addr;
  10.186 +    pgprot_t prot;
  10.187 +    proc_memdata_t * mem_data = (proc_memdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
  10.188 +
  10.189 +    prot = PAGE_SHARED; 
  10.190 +
  10.191 +    /* remap the range using xen specific routines */
  10.192 +    addr = direct_mmap(mem_data->pfn << PAGE_SHIFT, mem_data->tot_pages << PAGE_SHIFT, prot, 0, 0);
  10.193 +    //addr = direct_mmap(mem_data->pfn, mem_data->tot_pages << PAGE_SHIFT, prot, 1, 
  10.194 +    //                mem_data->tot_pages);
  10.195 +    copy_to_user((unsigned long *)buff, &addr, sizeof(addr));
  10.196 +
  10.197 +    return sizeof(addr);
  10.198 +     
  10.199 +}
  10.200 +
  10.201 +struct file_operations dom_mem_ops = {
  10.202 +    read:    dom_mem_read,
  10.203 +    write:   dom_mem_write,
  10.204 +};
  10.205 +
  10.206 +static int dom_map_mem(unsigned int dom, unsigned long pfn, int tot_pages)
  10.207 +{
  10.208 +    int ret = -ENOENT;
  10.209 +    struct proc_dir_entry * pd = xeno_base->subdir;
  10.210 +    struct proc_dir_entry * file;
  10.211 +    proc_memdata_t * memdata;
  10.212 +
  10.213 +    while(pd != NULL){
  10.214 +        if(((dom_procdata_t *)pd->data)->domain == dom){
  10.215 +
  10.216 +            /* check if there is already an entry for mem and if so
  10.217 +             * remove it.
  10.218 +             */
  10.219 +            remove_proc_entry(DOM_MEM, pd);
  10.220 +
  10.221 +            /* create new entry with parameters describing what to do
  10.222 +             * when it is mmaped.
  10.223 +             */
  10.224 +            file = create_proc_entry(DOM_MEM, 0600, pd);
  10.225 +            if(file != NULL)
  10.226 +            {
  10.227 +                file->owner = THIS_MODULE;
  10.228 +                file->nlink = 1;
  10.229 +                file->proc_fops = &dom_mem_ops;
  10.230 +
  10.231 +                memdata = (proc_memdata_t *)kmalloc(GFP_KERNEL, sizeof(proc_memdata_t));
  10.232 +                memdata->pfn = pfn;
  10.233 +                memdata->tot_pages = tot_pages;
  10.234 +                file->data = memdata;
  10.235 +
  10.236 +                ret = 0;
  10.237 +                break;
  10.238 +            }
  10.239 +            ret = -EAGAIN;
  10.240 +            break;
  10.241 +        }                    
  10.242 +        pd = pd->next;
  10.243 +    }
  10.244 +
  10.245 +    return ret;
  10.246 +}
  10.247 +
  10.248 +/* return dom id stored as data pointer to userspace */
  10.249 +static int dom_id_read_proc(char *page, char **start, off_t off,
  10.250 +                          int count, int *eof, void *data)
  10.251 +{
  10.252 +    char arg[16];
  10.253 +    sprintf(arg, "%d", (int)data);
  10.254 +    strcpy(page, arg);
  10.255 +    remove_proc_entry(DOM0_NEWDOM, xeno_base);
  10.256 +    return sizeof(unsigned int);
  10.257 +}
  10.258 +
  10.259 +static int cmd_write_proc(struct file *file, const char *buffer, 
  10.260                             u_long count, void *data)
  10.261  {
  10.262      dom0_op_t op;
  10.263 -    unsigned char c;
  10.264 -    unsigned int val;
  10.265 -    unsigned char result[20];
  10.266 -    int len = count, ret;
  10.267 +    int ret = 0;
  10.268 +    struct proc_dir_entry * new_dom_id;
  10.269 +    
  10.270 +    copy_from_user(&op, buffer, sizeof(dom0_op_t));
  10.271  
  10.272 -    while ( count )
  10.273 -    {
  10.274 -        c = *buffer++;
  10.275 -        count--;
  10.276 -        val = 0;
  10.277 -        if ( c == 'N' )
  10.278 -        {
  10.279 -            op.cmd = DOM0_NEWDOMAIN;
  10.280 -            while ( count && ((c = *buffer) >= '0') && (c <= '9') )
  10.281 -            {
  10.282 -                val *= 10;
  10.283 -                val += c - '0';
  10.284 -                buffer++; count--;
  10.285 -            }      
  10.286 -            op.u.newdomain.memory_kb = val;
  10.287 -            val = 0;
  10.288 -            if (count && (*buffer == ',')) 
  10.289 -            { 
  10.290 -                buffer++; count--;
  10.291 -                while ( count && ((c = *buffer) >= '0') && (c <= '9') )
  10.292 -                {
  10.293 -                    val *= 10;
  10.294 -                    val += c - '0';
  10.295 -                    buffer++; count--;
  10.296 -                }
  10.297 -            } 
  10.298 -            else 
  10.299 -            {
  10.300 -                val = 1; // default to 1 vif.
  10.301 -            }
  10.302 -            op.u.newdomain.num_vifs = val;
  10.303 -            ret = HYPERVISOR_dom0_op(&op);
  10.304 -        }
  10.305 -        else if ( c == 'K' )
  10.306 -        {
  10.307 -            op.cmd = DOM0_KILLDOMAIN;
  10.308 -            while ( count && ((c = *buffer) >= '0') && (c <= '9') )
  10.309 -            {
  10.310 -                val *= 10;
  10.311 -                val += c - '0';
  10.312 -                buffer++; count--;
  10.313 -            }        
  10.314 -            op.u.killdomain.domain = val;
  10.315 -            ret = HYPERVISOR_dom0_op(&op);
  10.316 -        }
  10.317 -        else
  10.318 -        {
  10.319 -            ret = -ENOSYS;
  10.320 -        }
  10.321 -        
  10.322 -        sprintf(result, "%d\n", ret);
  10.323 -        strcat(readbuf, result);
  10.324 -
  10.325 -        while ( count-- && (*buffer++ != '\n') ) continue;
  10.326 +    /* do some sanity checks */
  10.327 +    if(op.cmd > MAX_CMD){
  10.328 +        ret = -ENOSYS;
  10.329 +        goto out;
  10.330      }
  10.331  
  10.332 -    return len;
  10.333 +    /* is the request intended for hypervisor? */
  10.334 +    if(op.cmd != MAP_DOM_MEM){
  10.335 +        ret = HYPERVISOR_dom0_op(&op);
  10.336 +
  10.337 +        /* if new domain created, create proc entries */
  10.338 +        if(op.cmd == DOM0_NEWDOMAIN){
  10.339 +            create_proc_dom_entries(ret);
  10.340 +
  10.341 +            /* now notify user space of the new domain's id */
  10.342 +            new_dom_id = create_proc_entry(DOM0_NEWDOM, 0600, xeno_base);
  10.343 +            if ( new_dom_id != NULL )
  10.344 +            {
  10.345 +                new_dom_id->owner      = THIS_MODULE;
  10.346 +                new_dom_id->nlink      = 1;
  10.347 +                new_dom_id->read_proc  = dom_id_read_proc; 
  10.348 +                new_dom_id->data       = (void *)ret; 
  10.349 +            }
  10.350 +
  10.351 +        }
  10.352 +
  10.353 +    } else {
  10.354 +
  10.355 +        ret = dom_map_mem(op.u.reqdommem.domain, op.u.reqdommem.start_pfn, 
  10.356 +                        op.u.reqdommem.tot_pages); 
  10.357 +    }
  10.358 +    
  10.359 +out:
  10.360 +    return ret;
  10.361 +    
  10.362  }
  10.363  
  10.364 +static ssize_t ft_write(struct file * file, const char * buff, size_t size , loff_t * off)
  10.365 +{
  10.366 +    unsigned long addr;
  10.367 +    
  10.368 +    copy_from_user(&addr, (unsigned long *)buff, sizeof(addr));
  10.369 +    
  10.370 +    if(direct_unmap(addr, frame_table_len) == 0){
  10.371 +        return sizeof(addr);
  10.372 +    } else {
  10.373 +        return -1;
  10.374 +    }
  10.375 +}
  10.376 +
  10.377 +static ssize_t ft_read(struct file * file, char * buff, size_t size, loff_t * off)
  10.378 +{
  10.379 +    unsigned long addr;
  10.380 +    pgprot_t prot;
  10.381 +
  10.382 +    prot = PAGE_SHARED; 
  10.383 +
  10.384 +    /* remap the range using xen specific routines */
  10.385 +    addr = direct_mmap(frame_table_pa, frame_table_len, prot, 0, 0);
  10.386 +    copy_to_user((unsigned long *)buff, &addr, sizeof(addr));
  10.387 +
  10.388 +    return sizeof(addr);
  10.389 +     
  10.390 +}
  10.391 +
  10.392 +struct file_operations ft_ops = {
  10.393 +    read:   ft_read,
  10.394 +    write: ft_write,
  10.395 +};
  10.396  
  10.397  static int __init init_module(void)
  10.398  {
  10.399 +    
  10.400 +    frame_table = (frame_table_t *)start_info.frame_table;
  10.401 +    frame_table_len = start_info.frame_table_len;
  10.402 +    frame_table_pa = start_info.frame_table_pa;
  10.403 +
  10.404 +    /* xeno proc root setup */
  10.405 +    xeno_base = proc_mkdir(XENO_BASE, &proc_root); 
  10.406 +
  10.407 +    /* xeno control interface */
  10.408      *readbuf = '\0';
  10.409 -    proc_dom0 = create_proc_entry ("dom0", 0600, &proc_root);
  10.410 -    if ( proc_dom0 != NULL )
  10.411 +    dom0_cmd_intf = create_proc_entry (DOM0_CMD_INTF, 0600, xeno_base);
  10.412 +    if ( dom0_cmd_intf != NULL )
  10.413      {
  10.414 -        proc_dom0->owner      = THIS_MODULE;
  10.415 -        proc_dom0->nlink      = 1;
  10.416 -        proc_dom0->read_proc  = dom0_read_proc;
  10.417 -        proc_dom0->write_proc = dom0_write_proc;
  10.418 -        printk("Successfully installed domain-0 control interface\n");
  10.419 +        dom0_cmd_intf->owner      = THIS_MODULE;
  10.420 +        dom0_cmd_intf->nlink      = 1;
  10.421 +        dom0_cmd_intf->read_proc  = cmd_read_proc;
  10.422 +        dom0_cmd_intf->write_proc = cmd_write_proc;
  10.423      }
  10.424 +
  10.425 +    /* frame table mapping, to be mmaped */
  10.426 +    proc_ft = create_proc_entry(DOM0_FT, 0600, xeno_base);
  10.427 +    if(proc_ft != NULL)
  10.428 +    {   
  10.429 +        proc_ft->owner = THIS_MODULE;
  10.430 +        proc_ft->nlink = 1;
  10.431 +        proc_ft->proc_fops = &ft_ops;
  10.432 +    }
  10.433 +
  10.434 +    /* set up /proc entries for dom 0 */
  10.435 +    create_proc_dom_entries(0);
  10.436 +    
  10.437      return 0;
  10.438  }
  10.439  
  10.440  
  10.441  static void __exit cleanup_module(void)
  10.442  {
  10.443 -    if ( proc_dom0 == NULL ) return;
  10.444 +    if ( dom0_cmd_intf == NULL ) return;
  10.445      remove_proc_entry("dom0", &proc_root);
  10.446 -    proc_dom0 = NULL;
  10.447 +    dom0_cmd_intf = NULL;
  10.448  }
  10.449  
  10.450  
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_memory.c	Fri Dec 13 12:09:58 2002 +0000
    11.3 @@ -0,0 +1,351 @@
    11.4 +
    11.5 +#include <linux/slab.h>
    11.6 +#include <linux/mm.h>
    11.7 +#include <linux/mman.h>
    11.8 +#include <linux/swap.h>
    11.9 +#include <linux/smp_lock.h>
   11.10 +#include <linux/swapctl.h>
   11.11 +#include <linux/iobuf.h>
   11.12 +#include <linux/highmem.h>
   11.13 +#include <linux/pagemap.h>
   11.14 +#include <linux/list.h>
   11.15 +
   11.16 +#include <asm/pgalloc.h>
   11.17 +#include <asm/uaccess.h>
   11.18 +#include <asm/tlb.h>
   11.19 +#include <asm/mmu.h>
   11.20 +
   11.21 +#include "hypervisor_defs.h"
   11.22 +
   11.23 +#define MAP_CONT    0
   11.24 +#define MAP_DISCONT 1
   11.25 +
   11.26 +/*
   11.27 + * maps a range of physical memory into the requested pages. the old
   11.28 + * mappings are removed. any references to nonexistent pages results
   11.29 + * in null mappings (currently treated as "copy-on-access")
   11.30 + */
   11.31 +
   11.32 +/* bd240: functions below perform direct mapping to the real physical pages needed for
   11.33 + * mapping various hypervisor specific structures needed in dom0 userspace by various
   11.34 + * management applications such as domain builder etc.
   11.35 + */
   11.36 +
   11.37 +#define direct_set_pte(pteptr, pteval) queue_l1_entry_update(__pa(pteptr) | PGREQ_UNCHECKED_UPDATE, (pteval).pte_low)
   11.38 +
   11.39 +#define direct_pte_clear(pteptr) queue_l1_entry_update(__pa(pteptr) | PGREQ_UNCHECKED_UPDATE, 0)
   11.40 +
   11.41 +#define __direct_pte(x) ((pte_t) { (x) } )
   11.42 +#define __direct_mk_pte(page_nr,pgprot) __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
   11.43 +#define direct_mk_pte_phys(physpage, pgprot)   __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
   11.44 +
   11.45 +static inline void forget_pte(pte_t page)
   11.46 +{
   11.47 +    if (!pte_none(page)) {
   11.48 +        printk("forget_pte: old mapping existed!\n");
   11.49 +        BUG();
   11.50 +    }
   11.51 +}
   11.52 +
   11.53 +static inline void direct_remappte_range(pte_t * pte, unsigned long address, unsigned long size,
   11.54 +	unsigned long phys_addr, pgprot_t prot)
   11.55 +{
   11.56 +	unsigned long end;
   11.57 +
   11.58 +	address &= ~PMD_MASK;
   11.59 +	end = address + size;
   11.60 +	if (end > PMD_SIZE)
   11.61 +		end = PMD_SIZE;
   11.62 +	do {
   11.63 +		pte_t oldpage;
   11.64 +		oldpage = ptep_get_and_clear(pte);
   11.65 +        
   11.66 +        printk(KERN_ALERT "bd240 debug: %lx - %lx\n", address, phys_addr);
   11.67 +
   11.68 + 		direct_set_pte(pte, direct_mk_pte_phys(phys_addr, prot));
   11.69 +
   11.70 +		forget_pte(oldpage);
   11.71 +		address += PAGE_SIZE;
   11.72 +		phys_addr += PAGE_SIZE;
   11.73 +		pte++;
   11.74 +	} while (address && (address < end));
   11.75 +}
   11.76 +
   11.77 +static inline int direct_remappmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
   11.78 +	unsigned long phys_addr, pgprot_t prot)
   11.79 +{
   11.80 +	unsigned long end;
   11.81 +
   11.82 +	address &= ~PGDIR_MASK;
   11.83 +	end = address + size;
   11.84 +	if (end > PGDIR_SIZE)
   11.85 +		end = PGDIR_SIZE;
   11.86 +	phys_addr -= address;
   11.87 +	do {
   11.88 +		pte_t * pte = pte_alloc(mm, pmd, address);
   11.89 +		if (!pte)
   11.90 +			return -ENOMEM;
   11.91 +		direct_remappte_range(pte, address, end - address, address + phys_addr, prot);
   11.92 +		address = (address + PMD_SIZE) & PMD_MASK;
   11.93 +		pmd++;
   11.94 +	} while (address && (address < end));
   11.95 +	return 0;
   11.96 +}
   11.97 +
   11.98 +/*  Note: this is only safe if the mm semaphore is held when called. */
   11.99 +int direct_remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
  11.100 +{
  11.101 +	int error = 0;
  11.102 +	pgd_t * dir;
  11.103 +	unsigned long beg = from;
  11.104 +	unsigned long end = from + size;
  11.105 +	struct mm_struct *mm = current->mm;
  11.106 +
  11.107 +	phys_addr -= from;
  11.108 +	dir = pgd_offset(mm, from);
  11.109 +	flush_cache_range(mm, beg, end);
  11.110 +	if (from >= end)
  11.111 +		BUG();
  11.112 +
  11.113 +	spin_lock(&mm->page_table_lock);
  11.114 +	do {
  11.115 +		pmd_t *pmd = pmd_alloc(mm, dir, from);
  11.116 +		error = -ENOMEM;
  11.117 +		if (!pmd)
  11.118 +			break;
  11.119 +		error = direct_remappmd_range(mm, pmd, from, end - from, phys_addr + from, prot);
  11.120 +		if (error)
  11.121 +			break;
  11.122 +		from = (from + PGDIR_SIZE) & PGDIR_MASK;
  11.123 +		dir++;
  11.124 +	} while (from && (from < end));
  11.125 +	spin_unlock(&mm->page_table_lock);
  11.126 +	flush_tlb_range(mm, beg, end);
  11.127 +	return error;
  11.128 +}
  11.129 +
  11.130 +/* 
  11.131 + * used for remapping discontiguous bits of domain's memory, pages to map are
  11.132 + * found from frame table beginning at the given first_pg index
  11.133 + */ 
  11.134 +int direct_remap_disc_page_range(unsigned long from, unsigned long first_pg,
  11.135 +                int tot_pages, pgprot_t prot)
  11.136 +{
  11.137 +    frame_table_t * current_ft;
  11.138 +    unsigned long current_pfn;
  11.139 +    unsigned long start = from;
  11.140 +    int count = 0;
  11.141 +
  11.142 +    current_ft = (frame_table_t *)(frame_table + first_pg);
  11.143 +    current_pfn = first_pg; 
  11.144 +    while(count < tot_pages){
  11.145 +            if(direct_remap_page_range(start, current_pfn << PAGE_SHIFT, PAGE_SIZE, prot))
  11.146 +                goto out;
  11.147 +            start += PAGE_SIZE;
  11.148 +            current_pfn = current_ft->next;
  11.149 +            current_ft = (frame_table_t *)(frame_table + current_pfn);
  11.150 +            count++;
  11.151 +    }
  11.152 +
  11.153 +out:
  11.154 +
  11.155 +    return tot_pages - count;
  11.156 +} 
  11.157 +           
  11.158 +/* below functions replace standard sys_mmap and sys_munmap which are absolutely useless
  11.159 + * for direct memory mapping. direct_zap* functions are minor ammendments to the 
  11.160 + * original versions in mm/memory.c. the changes are to enable unmapping of real physical
  11.161 + * addresses.
  11.162 + */
  11.163 +
  11.164 +unsigned long direct_mmap(unsigned long phys_addr, unsigned long size, 
  11.165 +                pgprot_t prot, int flag, int tot_pages)
  11.166 +{
  11.167 +    direct_mmap_node_t * dmmap;
  11.168 +    unsigned long addr;
  11.169 +    int ret = 0;
  11.170 +    
  11.171 +    if(!capable(CAP_SYS_ADMIN)){
  11.172 +        ret = -EPERM;
  11.173 +        goto out;
  11.174 +    }
  11.175 +
  11.176 +    /* get unmapped area invokes xen specific arch_get_unmapped_area */
  11.177 +    addr = get_unmapped_area(NULL, 0, size, 0, 0);
  11.178 +    if(addr & ~PAGE_MASK){
  11.179 +        ret = -ENOMEM;
  11.180 +        goto out;
  11.181 +    }
  11.182 +
  11.183 +    /* add node on the list of directly mapped areas */ 
  11.184 +    dmmap = (direct_mmap_node_t *)kmalloc(GFP_KERNEL, sizeof(direct_mmap_node_t));
  11.185 +    dmmap->addr = addr;
  11.186 +    list_add(&dmmap->list, &current->mm->context.direct_list);
  11.187 +
  11.188 +    /* and perform the mapping */
  11.189 +    if(flag == MAP_DISCONT){
  11.190 +        ret = direct_remap_disc_page_range(addr, phys_addr, tot_pages, prot);
  11.191 +    } else {
  11.192 +        ret = direct_remap_page_range(addr, phys_addr, size, prot);
  11.193 +    }
  11.194 +
  11.195 +    if(ret == 0)
  11.196 +        ret = addr;
  11.197 +
  11.198 +out: 
  11.199 +    return ret;
  11.200 +}
  11.201 +
  11.202 +/* most of the checks, refcnt updates, cache stuff have been thrown out as they are not
  11.203 + * needed
  11.204 + */
  11.205 +static inline int direct_zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, 
  11.206 +                unsigned long size)
  11.207 +{
  11.208 +	unsigned long offset;
  11.209 +	pte_t * ptep;
  11.210 +	int freed = 0;
  11.211 +
  11.212 +	if (pmd_none(*pmd))
  11.213 +		return 0;
  11.214 +	if (pmd_bad(*pmd)) {
  11.215 +		pmd_ERROR(*pmd);
  11.216 +		pmd_clear(pmd);
  11.217 +		return 0;
  11.218 +	}
  11.219 +	ptep = pte_offset(pmd, address);
  11.220 +	offset = address & ~PMD_MASK;
  11.221 +	if (offset + size > PMD_SIZE)
  11.222 +		size = PMD_SIZE - offset;
  11.223 +	size &= PAGE_MASK;
  11.224 +	for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
  11.225 +		pte_t pte = *ptep;
  11.226 +		if (pte_none(pte))
  11.227 +			continue;
  11.228 +		freed ++;
  11.229 +		direct_pte_clear(ptep);
  11.230 +	}
  11.231 +
  11.232 +	return freed;
  11.233 +}
  11.234 +
  11.235 +static inline int direct_zap_pmd_range(mmu_gather_t *tlb, pgd_t * dir, 
  11.236 +                unsigned long address, unsigned long size)
  11.237 +{
  11.238 +	pmd_t * pmd;
  11.239 +	unsigned long end;
  11.240 +	int freed;
  11.241 +
  11.242 +	if (pgd_none(*dir))
  11.243 +		return 0;
  11.244 +	if (pgd_bad(*dir)) {
  11.245 +		pgd_ERROR(*dir);
  11.246 +		pgd_clear(dir);
  11.247 +		return 0;
  11.248 +	}
  11.249 +	pmd = pmd_offset(dir, address);
  11.250 +	end = address + size;
  11.251 +	if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
  11.252 +		end = ((address + PGDIR_SIZE) & PGDIR_MASK);
  11.253 +	freed = 0;
  11.254 +	do {
  11.255 +		freed += direct_zap_pte_range(tlb, pmd, address, end - address);
  11.256 +		address = (address + PMD_SIZE) & PMD_MASK; 
  11.257 +		pmd++;
  11.258 +	} while (address < end);
  11.259 +	return freed;
  11.260 +}
  11.261 +
  11.262 +/*
  11.263 + * remove user pages in a given range.
  11.264 + */
  11.265 +void direct_zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
  11.266 +{
  11.267 +	mmu_gather_t *tlb;
  11.268 +	pgd_t * dir;
  11.269 +	unsigned long start = address, end = address + size;
  11.270 +	int freed = 0;
  11.271 +
  11.272 +	dir = pgd_offset(mm, address);
  11.273 +
  11.274 +	/*
  11.275 +	 * This is a long-lived spinlock. That's fine.
  11.276 +	 * There's no contention, because the page table
  11.277 +	 * lock only protects against kswapd anyway, and
  11.278 +	 * even if kswapd happened to be looking at this
  11.279 +	 * process we _want_ it to get stuck.
  11.280 +	 */
  11.281 +	if (address >= end)
  11.282 +		BUG();
  11.283 +	spin_lock(&mm->page_table_lock);
  11.284 +	flush_cache_range(mm, address, end);
  11.285 +	tlb = tlb_gather_mmu(mm);
  11.286 +
  11.287 +	do {
  11.288 +		freed += direct_zap_pmd_range(tlb, dir, address, end - address);
  11.289 +		address = (address + PGDIR_SIZE) & PGDIR_MASK;
  11.290 +		dir++;
  11.291 +	} while (address && (address < end));
  11.292 +
  11.293 +	/* this will flush any remaining tlb entries */
  11.294 +	tlb_finish_mmu(tlb, start, end);
  11.295 +
  11.296 +    /* decrementing rss removed */
  11.297 +
  11.298 +	spin_unlock(&mm->page_table_lock);
  11.299 +}
  11.300 +
  11.301 +int direct_unmap(unsigned long addr, unsigned long size)
  11.302 +{
  11.303 +    direct_mmap_node_t * node;
  11.304 +    struct list_head * curr;
  11.305 +    struct list_head * direct_list = &current->mm->context.direct_list;    
  11.306 +
  11.307 +    curr = direct_list->next;
  11.308 +    while(curr != direct_list){
  11.309 +        node = list_entry(curr, direct_mmap_node_t, list);
  11.310 +        if(node->addr == addr)
  11.311 +            break;
  11.312 +        curr = curr->next;
  11.313 +    }
  11.314 +
  11.315 +    if(curr == direct_list)
  11.316 +        return -1;
  11.317 +
  11.318 +    list_del(&node->list);
  11.319 +    kfree(node);
  11.320 +
  11.321 +    direct_zap_page_range(current->mm, addr, size);
  11.322 + 
  11.323 +    return 0;
  11.324 +}
  11.325 +
  11.326 +int direct_disc_unmap(unsigned long from, unsigned long first_pg, int tot_pages)
  11.327 +{
  11.328 +    int count = 0;
  11.329 +    direct_mmap_node_t * node;
  11.330 +    struct list_head * curr;
  11.331 +    struct list_head * direct_list = &current->mm->context.direct_list;    
  11.332 +
  11.333 +    curr = direct_list->next;
  11.334 +    while(curr != direct_list){
  11.335 +        node = list_entry(curr, direct_mmap_node_t, list);
  11.336 +        if(node->addr == from)
  11.337 +            break;
  11.338 +        curr = curr->next;
  11.339 +    }
  11.340 +
  11.341 +    if(curr == direct_list)
  11.342 +        return -1;
  11.343 +
  11.344 +    list_del(&node->list);
  11.345 +    kfree(node);
  11.346 +
  11.347 +    while(count < tot_pages){
  11.348 +            direct_zap_page_range(current->mm, from, PAGE_SIZE);
  11.349 +            from += PAGE_SIZE;
  11.350 +            count++;
  11.351 +    }
  11.352 +
  11.353 +    return 0;
  11.354 +} 
    12.1 --- a/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_ops.h	Fri Dec 06 23:41:47 2002 +0000
    12.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_ops.h	Fri Dec 13 12:09:58 2002 +0000
    12.3 @@ -1,18 +1,26 @@
    12.4 +
    12.5  /******************************************************************************
    12.6   * dom0_ops.h
    12.7   * 
    12.8   * Process command requests from domain-0 guest OS.
    12.9   * 
   12.10 - * Copyright (c) 2002, K A Fraser
   12.11 + * Copyright (c) 2002, K A Fraser, B Dragovic
   12.12   */
   12.13  
   12.14  #define DOM0_NEWDOMAIN   0
   12.15  #define DOM0_KILLDOMAIN  1
   12.16 +#define DOM0_MAPTASK     2
   12.17 +#define MAP_DOM_MEM      3
   12.18 +#define DOM0_STARTDOM    4
   12.19 +#define MAX_CMD          4
   12.20 +
   12.21 +#define MAX_CMD_LEN     256
   12.22  
   12.23  typedef struct dom0_newdomain_st
   12.24  {
   12.25      unsigned int memory_kb;
   12.26 -    unsigned int num_vifs;
   12.27 +    unsigned int num_vifs;  // temporary
   12.28 +    unsigned int domain;    // return parameter
   12.29  } dom0_newdomain_t;
   12.30  
   12.31  typedef struct dom0_killdomain_st
   12.32 @@ -20,6 +28,29 @@ typedef struct dom0_killdomain_st
   12.33      unsigned int domain;
   12.34  } dom0_killdomain_t;
   12.35  
   12.36 +typedef struct dom0_map_ts
   12.37 +{
   12.38 +    unsigned int domain;
   12.39 +    unsigned long ts_phy_addr;
   12.40 +} dom0_tsmap_t;
   12.41 +
   12.42 +typedef struct dom_mem_req 
   12.43 +{
   12.44 +    unsigned int domain;
   12.45 +    unsigned long start_pfn;
   12.46 +    int tot_pages;
   12.47 +} dom_mem_req_t;
   12.48 +
   12.49 +typedef struct domain_launch
   12.50 +{
   12.51 +    unsigned int domain;
   12.52 +    unsigned long l2_pgt_addr;
   12.53 +    unsigned long virt_load_addr;
   12.54 +    unsigned long virt_shinfo_addr;
   12.55 +    unsigned long virt_startinfo_addr;
   12.56 +    char cmd_line[MAX_CMD_LEN];
   12.57 +} dom_meminfo_t;
   12.58 +
   12.59  typedef struct dom0_op_st
   12.60  {
   12.61      unsigned long cmd;
   12.62 @@ -27,6 +58,9 @@ typedef struct dom0_op_st
   12.63      {
   12.64          dom0_newdomain_t newdomain;
   12.65          dom0_killdomain_t killdomain;
   12.66 +        dom0_tsmap_t mapdomts;
   12.67 +        dom_mem_req_t reqdommem;
   12.68      }
   12.69      u;
   12.70  } dom0_op_t;
   12.71 +
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/hypervisor_defs.h	Fri Dec 13 12:09:58 2002 +0000
    13.3 @@ -0,0 +1,33 @@
    13.4 +
    13.5 +/******************************************************************************
    13.6 + * dom0_ops.h
    13.7 + * 
    13.8 + * Data structures defined in hypervisor code but needed in DOM0 as well. 
    13.9 + * Contents of this file should be kept in sync with the hypervisor ones
   13.10 + * unless you do not want something terrible :) to happen. 
   13.11 + * 
   13.12 + * Copyright (c) 2002, Keir Fraser & Boris Dragovic 
   13.13 + */
   13.14 +
   13.15 +
   13.16 +/* original version: xen-2.4.16/include/xeno/mm.h */
   13.17 +typedef struct pfn_info {
   13.18 +    struct list_head list;      /* ->mapping has some page lists. */
   13.19 +    unsigned long next;         /* used for threading pages belonging */
   13.20 +    unsigned long prev;         /* to same domain */
   13.21 +    unsigned long flags;        /* atomic flags. */
   13.22 +    unsigned long tot_count;    /* Total domain usage count. */
   13.23 +    unsigned long type_count;   /* pagetable/dir, or domain-writeable refs. */
   13.24 +} frame_table_t;
   13.25 +
   13.26 +extern frame_table_t * frame_table;
   13.27 +
   13.28 +typedef struct proc_data {
   13.29 +    unsigned int domain;
   13.30 +    unsigned long map_size;
   13.31 +} dom_procdata_t;
   13.32 +
   13.33 +typedef struct proc_mem_data {
   13.34 +    unsigned long pfn;
   13.35 +    int tot_pages;
   13.36 +} proc_memdata_t;
    14.1 --- a/xenolinux-2.4.16-sparse/arch/xeno/mm/Makefile	Fri Dec 06 23:41:47 2002 +0000
    14.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/mm/Makefile	Fri Dec 13 12:09:58 2002 +0000
    14.3 @@ -1,6 +1,7 @@
    14.4 +
    14.5  
    14.6  O_TARGET := mm.o
    14.7  
    14.8 -obj-y	 := init.o fault.o extable.o hypervisor.o
    14.9 +obj-y	 := init.o fault.o extable.o hypervisor.o get_unmapped_area.o mmu_context.o
   14.10  
   14.11  include $(TOPDIR)/Rules.make
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c	Fri Dec 13 12:09:58 2002 +0000
    15.3 @@ -0,0 +1,71 @@
    15.4 +
    15.5 +#include <linux/slab.h>
    15.6 +#include <linux/shm.h>
    15.7 +#include <linux/mman.h>
    15.8 +#include <linux/pagemap.h>
    15.9 +#include <linux/swap.h>
   15.10 +#include <linux/swapctl.h>
   15.11 +#include <linux/smp_lock.h>
   15.12 +#include <linux/init.h>
   15.13 +#include <linux/file.h>
   15.14 +#include <linux/fs.h>
   15.15 +#include <linux/personality.h>
   15.16 +
   15.17 +#include <asm/uaccess.h>
   15.18 +#include <asm/pgalloc.h>
   15.19 +
   15.20 +static int direct_mapped(unsigned long addr)
   15.21 +{
   15.22 +    direct_mmap_node_t * node;
   15.23 +    struct list_head * curr;
   15.24 +    struct list_head * direct_list = &current->mm->context.direct_list;
   15.25 +
   15.26 +    /* now, this loop is going to make things slow, maybe should think
   15.27 +     * of a better way to implement it, maybe without list_head
   15.28 +     */
   15.29 +    curr = direct_list->next;
   15.30 +    while(curr != direct_list){
   15.31 +        node = list_entry(curr, direct_mmap_node_t, list);
   15.32 +        if(node->addr == addr)
   15.33 +            break;
   15.34 +        curr = curr->next;
   15.35 +    } 
   15.36 +
   15.37 +    if(curr == direct_list)
   15.38 +        return 0;
   15.39 +
   15.40 +    return 1;
   15.41 +}
   15.42 +
   15.43 +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
   15.44 +{
   15.45 +	struct vm_area_struct *vma;
   15.46 +
   15.47 +	if (len > TASK_SIZE)
   15.48 +		return -ENOMEM;
   15.49 +
   15.50 +	if (addr) {
   15.51 +		addr = PAGE_ALIGN(addr);
   15.52 +		vma = find_vma(current->mm, addr);
   15.53 +		if (TASK_SIZE - len >= addr &&
   15.54 +		    (!vma || addr + len <= vma->vm_start))
   15.55 +			return addr;
   15.56 +	}
   15.57 +	addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
   15.58 +
   15.59 +	for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
   15.60 +		/* At this point:  (!vma || addr < vma->vm_end). */
   15.61 +		if (TASK_SIZE - len < addr)
   15.62 +			return -ENOMEM;
   15.63 +        
   15.64 +        /* here we check whether the vma is big enough and we also check
   15.65 +         * whether it has already been direct mapped, in which case it
   15.66 +         * is not available. this is the only difference to generic
   15.67 +         * arch_get_unmapped_area. 
   15.68 +         */
   15.69 +		if ((!vma || addr + len <= vma->vm_start) && !direct_mapped(addr))
   15.70 +			return addr;
   15.71 +		
   15.72 +        addr = vma->vm_end;
   15.73 +	}
   15.74 +}
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/mm/mmu_context.c	Fri Dec 13 12:09:58 2002 +0000
    16.3 @@ -0,0 +1,26 @@
    16.4 +
    16.5 +#include <linux/slab.h>
    16.6 +#include <linux/list.h>
    16.7 +
    16.8 +int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
    16.9 +{
   16.10 +    INIT_LIST_HEAD(&mm->context.direct_list);
   16.11 +    return 0;
   16.12 +}
   16.13 +
   16.14 +/* just free all elements of list identifying directly mapped areas */
   16.15 +void destroy_context(struct mm_struct *mm)
   16.16 +{
   16.17 +    direct_mmap_node_t * node;
   16.18 +    struct list_head * curr;
   16.19 +    struct list_head * direct_list = &mm->context.direct_list;
   16.20 +
   16.21 +    curr = direct_list->next;
   16.22 +    while(curr != direct_list){
   16.23 +        node = list_entry(curr, direct_mmap_node_t, list);
   16.24 +        curr = curr->next;
   16.25 +        list_del(&node->list);
   16.26 +        kfree(node);
   16.27 +   }
   16.28 +
   16.29 +}
    17.1 --- a/xenolinux-2.4.16-sparse/include/asm-xeno/mmu.h	Fri Dec 06 23:41:47 2002 +0000
    17.2 +++ b/xenolinux-2.4.16-sparse/include/asm-xeno/mmu.h	Fri Dec 13 12:09:58 2002 +0000
    17.3 @@ -1,6 +1,15 @@
    17.4 +
    17.5  #ifndef __i386_MMU_H
    17.6  #define __i386_MMU_H
    17.7  
    17.8 +#include <linux/list.h>
    17.9 +
   17.10 +/* describes dirrectly mapped vma nodes */
   17.11 +typedef struct {
   17.12 +    struct list_head list;
   17.13 +    unsigned long addr;
   17.14 +} direct_mmap_node_t;
   17.15 +
   17.16  /*
   17.17   * The i386 doesn't have a mmu context, but
   17.18   * we put the segment information here.
   17.19 @@ -8,6 +17,7 @@
   17.20  typedef struct { 
   17.21  	void *segments;
   17.22  	unsigned long cpuvalid;
   17.23 +    struct list_head direct_list;
   17.24  } mm_context_t;
   17.25  
   17.26  #endif
    18.1 --- a/xenolinux-2.4.16-sparse/include/asm-xeno/mmu_context.h	Fri Dec 06 23:41:47 2002 +0000
    18.2 +++ b/xenolinux-2.4.16-sparse/include/asm-xeno/mmu_context.h	Fri Dec 13 12:09:58 2002 +0000
    18.3 @@ -1,3 +1,4 @@
    18.4 +
    18.5  #ifndef __I386_MMU_CONTEXT_H
    18.6  #define __I386_MMU_CONTEXT_H
    18.7  
    18.8 @@ -9,8 +10,12 @@
    18.9  /*
   18.10   * possibly do the LDT unload here?
   18.11   */
   18.12 -#define destroy_context(mm)		do { } while(0)
   18.13 -#define init_new_context(tsk,mm)	0
   18.14 +
   18.15 +extern int init_new_context(struct task_struct *tsk, struct mm_struct *);
   18.16 +extern void destroy_context(struct mm_struct *);
   18.17 +
   18.18 +//#define destroy_context(mm)		do { } while(0)
   18.19 +//#define init_new_context(tsk,mm)	0
   18.20  
   18.21  #ifdef CONFIG_SMP
   18.22  
    19.1 --- a/xenolinux-2.4.16-sparse/include/asm-xeno/pgtable.h	Fri Dec 06 23:41:47 2002 +0000
    19.2 +++ b/xenolinux-2.4.16-sparse/include/asm-xeno/pgtable.h	Fri Dec 13 12:09:58 2002 +0000
    19.3 @@ -3,6 +3,8 @@
    19.4  
    19.5  #include <linux/config.h>
    19.6  
    19.7 +#define HAVE_ARCH_UNMAPPED_AREA
    19.8 +
    19.9  /*
   19.10   * The Linux memory management assumes a three-level page table setup. On
   19.11   * the i386, we use that, but "fold" the mid level into the top-level page