ia64/xen-unstable

changeset 87:336647fd8f40

bitkeeper revision 1.16 (3e256e80_sVhHRajq5OeU_8MGxwZ_w)

attampt to merge :)
author bd240@boulderdash.cl.cam.ac.uk
date Wed Jan 15 14:21:52 2003 +0000 (2003-01-15)
parents 4a10fe9b20ec 7c48a158429c
children 2ca9cc27dbec
files .rootkeys BitKeeper/etc/logging_ok xen-2.4.16/arch/i386/entry.S xen-2.4.16/common/dom0_ops.c xen-2.4.16/common/domain.c xen-2.4.16/common/memory.c xen-2.4.16/include/hypervisor-ifs/hypervisor-if.h xen-2.4.16/include/xeno/dom0_ops.h xen-2.4.16/include/xeno/sched.h xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/Makefile xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_core.c xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_memory.c xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_ops.h xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/hypervisor_defs.h xenolinux-2.4.16-sparse/arch/xeno/mm/Makefile xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c xenolinux-2.4.16-sparse/arch/xeno/mm/mmu_context.c xenolinux-2.4.16-sparse/include/asm-xeno/mmu.h xenolinux-2.4.16-sparse/include/asm-xeno/mmu_context.h xenolinux-2.4.16-sparse/include/asm-xeno/pgtable.h
line diff
     1.1 --- a/.rootkeys	Wed Jan 15 00:21:24 2003 +0000
     1.2 +++ b/.rootkeys	Wed Jan 15 14:21:52 2003 +0000
     1.3 @@ -248,7 +248,9 @@ 3ddb79b7LLVJBGynxHSOh9A9l97sug xenolinux
     1.4  3ddb79b7UG2QiRAU-Wvc1Y_BLigu1Q xenolinux-2.4.16-sparse/arch/xeno/drivers/console/console.c
     1.5  3ddb79b75eo4PRXkT6Th9popt_SJhg xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/Makefile
     1.6  3ddb79b7Xyaoep6U0kLvx6Kx7OauDw xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_core.c
     1.7 +3df9ce13K7qSLBtHV-01QHPW62649Q xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_memory.c
     1.8  3ddb79b7PulSkF9m3c7K5MkxHRf4hA xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_ops.h
     1.9 +3df9ce13tITy-OuYx_zQemsvqqLTWA xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/hypervisor_defs.h
    1.10  3ddba759XOjcl_OF-52dOYq7sgMykQ xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/vfr.c
    1.11  3ddb79b7s7yYBioHidSkIoHtQxYmOw xenolinux-2.4.16-sparse/arch/xeno/drivers/network/Makefile
    1.12  3ddb79b7CpLL98ScdpbKkVBktlbCtQ xenolinux-2.4.16-sparse/arch/xeno/drivers/network/network.c
    1.13 @@ -284,8 +286,10 @@ 3ddb79b82kQ5oIXpxq3TUmlgxsLzLg xenolinux
    1.14  3ddb79b8qdD_svLCCAja_oP2w4Tn8Q xenolinux-2.4.16-sparse/arch/xeno/mm/Makefile
    1.15  3ddb79b8ukY8dsPYmR8eNk-aCzFPsQ xenolinux-2.4.16-sparse/arch/xeno/mm/extable.c
    1.16  3ddb79b856Zta9b3s0bgUCGbG1blvQ xenolinux-2.4.16-sparse/arch/xeno/mm/fault.c
    1.17 +3df9ce13dZ6UGDjZbUeZfyH4Hy6aCA xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c
    1.18  3ddb79b85fpsKT8A9WYnuJg03b715g xenolinux-2.4.16-sparse/arch/xeno/mm/hypervisor.c
    1.19  3ddb79b83Zj7Xn2QVhU4HeMuAC9FjA xenolinux-2.4.16-sparse/arch/xeno/mm/init.c
    1.20 +3df9ce13TRWIv0Mawm15zESP7jcT7A xenolinux-2.4.16-sparse/arch/xeno/mm/mmu_context.c
    1.21  3ddb79b7aKdTkbr3u6aze8tVwGh_TQ xenolinux-2.4.16-sparse/arch/xeno/vmlinux.lds
    1.22  3ddb79bbx682YH6vR2zbVOXwg73ULg xenolinux-2.4.16-sparse/drivers/block/ll_rw_blk.c
    1.23  3ddb79bcJfHdwrPsjqgI33_OsGdVCg xenolinux-2.4.16-sparse/drivers/block/rd.c
     2.1 --- a/BitKeeper/etc/logging_ok	Wed Jan 15 00:21:24 2003 +0000
     2.2 +++ b/BitKeeper/etc/logging_ok	Wed Jan 15 14:21:52 2003 +0000
     2.3 @@ -1,7 +1,9 @@
     2.4  akw27@boulderdash.cl.cam.ac.uk
     2.5  akw27@labyrinth.cl.cam.ac.uk
     2.6 +bd240@boulderdash.cl.cam.ac.uk
     2.7  kaf24@labyrinth.cl.cam.ac.uk
     2.8  kaf24@plym.cl.cam.ac.uk
     2.9  kaf24@striker.cl.cam.ac.uk
    2.10 +lynx@idefix.cl.cam.ac.uk
    2.11  smh22@boulderdash.cl.cam.ac.uk
    2.12  smh22@uridium.cl.cam.ac.uk
     3.1 --- a/xen-2.4.16/arch/i386/entry.S	Wed Jan 15 00:21:24 2003 +0000
     3.2 +++ b/xen-2.4.16/arch/i386/entry.S	Wed Jan 15 14:21:52 2003 +0000
     3.3 @@ -102,7 +102,7 @@ PROCESSOR       =  0
     3.4  STATE           =  4
     3.5  HYP_EVENTS      =  8
     3.6  DOMAIN          = 12        
     3.7 -SHARED_INFO     = 16
     3.8 +SHARED_INFO     = 24
     3.9  
    3.10  /* Offsets in shared_info_t */
    3.11  EVENTS          =  0
     4.1 --- a/xen-2.4.16/common/dom0_ops.c	Wed Jan 15 00:21:24 2003 +0000
     4.2 +++ b/xen-2.4.16/common/dom0_ops.c	Wed Jan 15 14:21:52 2003 +0000
     4.3 @@ -1,3 +1,4 @@
     4.4 +
     4.5  /******************************************************************************
     4.6   * dom0_ops.c
     4.7   * 
     4.8 @@ -13,6 +14,7 @@
     4.9  #include <xeno/sched.h>
    4.10  #include <xeno/event.h>
    4.11  
    4.12 +extern unsigned int alloc_new_dom_mem(struct task_struct *, unsigned int);
    4.13  
    4.14  static unsigned int get_domnr(void)
    4.15  {
    4.16 @@ -42,6 +44,21 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    4.17      switch ( op.cmd )
    4.18      {
    4.19  
    4.20 +    case DOM0_STARTDOM:
    4.21 +    {
    4.22 +        struct task_struct * p = find_domain_by_id(op.u.meminfo.domain);
    4.23 +        ret = final_setup_guestos(p, &op.u.meminfo);
    4.24 +        if( ret != 0 ){
    4.25 +            p->state = TASK_DYING;
    4.26 +            release_task(p);
    4.27 +            break;
    4.28 +        }
    4.29 +        wake_up(p);
    4.30 +        reschedule(p);
    4.31 +        ret = p->domain;
    4.32 +    }
    4.33 +    break;
    4.34 +
    4.35      case DOM0_NEWDOMAIN:
    4.36      {
    4.37          struct task_struct *p;
    4.38 @@ -54,6 +71,20 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    4.39          p->domain = dom;
    4.40          pro = (pro+1) % smp_num_cpus;
    4.41          p->processor = pro;
    4.42 +
    4.43 +        /* if we are not booting dom 0 than only mem 
    4.44 +         * needs to be allocated
    4.45 +         */
    4.46 +        if(dom != 0){
    4.47 +            if(alloc_new_dom_mem(p, op.u.newdomain.memory_kb) != 0){
    4.48 +                ret = -1;
    4.49 +                break;
    4.50 +            }
    4.51 +            ret = p->domain;
    4.52 +            break;
    4.53 +        }
    4.54 +
    4.55 +        /* executed only in case of domain 0 */
    4.56          ret = setup_guestos(p, &op.u.newdomain);    /* Load guest OS into @p */
    4.57          if ( ret != 0 ) 
    4.58          {
    4.59 @@ -81,6 +112,16 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    4.60      }
    4.61      break;
    4.62  
    4.63 +    case DOM0_MAPTASK:
    4.64 +    {
    4.65 +        unsigned int dom = op.u.mapdomts.domain;
    4.66 +        
    4.67 +        op.u.mapdomts.ts_phy_addr = __pa(find_domain_by_id(dom));
    4.68 +        copy_to_user(u_dom0_op, &op, sizeof(op));
    4.69 +
    4.70 +    }
    4.71 +    break;
    4.72 +
    4.73      default:
    4.74          ret = -ENOSYS;
    4.75  
     5.1 --- a/xen-2.4.16/common/domain.c	Wed Jan 15 00:21:24 2003 +0000
     5.2 +++ b/xen-2.4.16/common/domain.c	Wed Jan 15 14:21:52 2003 +0000
     5.3 @@ -11,6 +11,15 @@
     5.4  #include <xeno/dom0_ops.h>
     5.5  #include <asm/io.h>
     5.6  #include <asm/domain_page.h>
     5.7 +#include <asm/msr.h>
     5.8 +#include <xeno/multiboot.h>
     5.9 +
    5.10 +#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
    5.11 +#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
    5.12 +
    5.13 +extern int nr_mods;
    5.14 +extern module_t *mod;
    5.15 +extern unsigned char *cmdline;
    5.16  
    5.17  rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;
    5.18  
    5.19 @@ -374,6 +383,124 @@ static unsigned int alloc_new_dom_mem(st
    5.20      return 0;
    5.21  }
    5.22  
    5.23 +int final_setup_guestos(struct task_struct * p, dom_meminfo_t * meminfo)
    5.24 +{
    5.25 +    l2_pgentry_t * l2tab;
    5.26 +    l1_pgentry_t * l1tab;
    5.27 +    start_info_t * virt_startinfo_addr;
    5.28 +    unsigned long virt_stack_addr;
    5.29 +    unsigned long long time;
    5.30 +    net_ring_t *net_ring;
    5.31 +    char *dst;    // temporary
    5.32 +    int i;        // temporary
    5.33 +
    5.34 +    /* entries 0xe0000000 onwards in page table must contain hypervisor
    5.35 +     * mem mappings - set them up.
    5.36 +     */
    5.37 +    l2tab = (l2_pgentry_t *)__va(meminfo->l2_pgt_addr);
    5.38 +    memcpy(l2tab + DOMAIN_ENTRIES_PER_L2_PAGETABLE, 
    5.39 +        ((l2_pgentry_t *)idle0_pg_table) + DOMAIN_ENTRIES_PER_L2_PAGETABLE, 
    5.40 +        (ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE) * sizeof(l2_pgentry_t));
    5.41 +    p->mm.pagetable = mk_pagetable((unsigned long)l2tab);
    5.42 +
    5.43 +    /* map in the shared info structure */
    5.44 +    l2tab = pagetable_ptr(p->mm.pagetable) + l2_table_offset(meminfo->virt_shinfo_addr);
    5.45 +    l1tab = l2_pgentry_to_l1(*l2tab) + l1_table_offset(meminfo->virt_shinfo_addr);
    5.46 +    *l1tab = mk_l1_pgentry(__pa(p->shared_info) | L1_PROT);
    5.47 +
    5.48 +    /* set up the shared info structure */
    5.49 +    rdtscll(time);
    5.50 +    p->shared_info->wall_time    = time;
    5.51 +    p->shared_info->domain_time  = time;
    5.52 +    p->shared_info->ticks_per_ms = ticks_per_usec * 1000;
    5.53 +
    5.54 +    /* we pass start info struct to guest os as function parameter on stack */
    5.55 +    virt_startinfo_addr = (start_info_t *)meminfo->virt_startinfo_addr;
    5.56 +    virt_stack_addr = (unsigned long)virt_startinfo_addr;       
    5.57 +
    5.58 +    /* we need to populate start_info struct within the context of the
    5.59 +     * new domain. thus, temporarely install its pagetables.
    5.60 +     */
    5.61 +    __cli();
    5.62 +    __asm__ __volatile__ (
    5.63 +        "mov %%eax, %%cr3"
    5.64 +        : : "a" (__pa(pagetable_ptr(p->mm.pagetable))));
    5.65 +
    5.66 +    memset(virt_startinfo_addr, 0, sizeof(virt_startinfo_addr));
    5.67 +    virt_startinfo_addr->nr_pages = p->tot_pages;
    5.68 +    virt_startinfo_addr->shared_info = (shared_info_t *)meminfo->virt_shinfo_addr;
    5.69 +    virt_startinfo_addr->pt_base = meminfo->virt_load_addr + 
    5.70 +                    ((p->tot_pages - 1) << PAGE_SHIFT);
    5.71 +
    5.72 +    /* now, this is just temprorary before we switch to pseudo phys
    5.73 +     * addressing. this works only for contiguous chunks of memory!!!
    5.74 +     */
    5.75 +    virt_startinfo_addr->phys_base = p->pg_head << PAGE_SHIFT;
    5.76 +    
    5.77 +    /* Add virtual network interfaces and point to them in startinfo. */
    5.78 +    while (meminfo->num_vifs-- > 0) {
    5.79 +        net_ring = create_net_vif(p->domain);
    5.80 +        if (!net_ring) panic("no network ring!\n");
    5.81 +    }
    5.82 +    virt_startinfo_addr->net_rings = p->net_ring_base;
    5.83 +    virt_startinfo_addr->num_net_rings = p->num_net_vifs;
    5.84 +
    5.85 +    /* Add block io interface */
    5.86 +    virt_startinfo_addr->blk_ring = p->blk_ring_base;
    5.87 +
    5.88 +    /* i do not think this has to be done any more, temporary */
    5.89 +    /* We tell OS about any modules we were given. */
    5.90 +    if ( nr_mods > 1 )
    5.91 +    {
    5.92 +        virt_startinfo_addr->mod_start = 
    5.93 +            (mod[1].mod_start-mod[0].mod_start-12) + meminfo->virt_load_addr;
    5.94 +        virt_startinfo_addr->mod_len = 
    5.95 +            mod[nr_mods-1].mod_end - mod[1].mod_start;
    5.96 +    }
    5.97 +
    5.98 +    /* temporary, meminfo->cmd_line just needs to be copied info start info */
    5.99 +    dst = virt_startinfo_addr->cmd_line;
   5.100 +    if ( mod[0].string )
   5.101 +    {
   5.102 +        char *modline = (char *)__va(mod[0].string);
   5.103 +        for ( i = 0; i < 255; i++ )
   5.104 +        {
   5.105 +            if ( modline[i] == '\0' ) break;
   5.106 +            *dst++ = modline[i];
   5.107 +        }
   5.108 +    }
   5.109 +    *dst = '\0';
   5.110 +
   5.111 +    if ( opt_nfsroot )
   5.112 +    {
   5.113 +        unsigned char boot[150];
   5.114 +        unsigned char ipbase[20], nfsserv[20], gateway[20], netmask[20];
   5.115 +        unsigned char nfsroot[70];
   5.116 +        snprintf(nfsroot, 70, opt_nfsroot, p->domain); 
   5.117 +        snprintf(boot, 200,
   5.118 +                " root=/dev/nfs ip=%s:%s:%s:%s::eth0:off nfsroot=%s",
   5.119 +                 quad_to_str(opt_ipbase + p->domain, ipbase),
   5.120 +                 quad_to_str(opt_nfsserv, nfsserv),
   5.121 +                 quad_to_str(opt_gateway, gateway),
   5.122 +                 quad_to_str(opt_netmask, netmask),
   5.123 +                 nfsroot);
   5.124 +        strcpy(dst, boot);
   5.125 +    }
   5.126 +
   5.127 +    /* Reinstate the caller's page tables. */
   5.128 +    __asm__ __volatile__ (
   5.129 +        "mov %%eax,%%cr3"
   5.130 +        : : "a" (__pa(pagetable_ptr(current->mm.pagetable))));    
   5.131 +    __sti();
   5.132 +
   5.133 +    new_thread(p, 
   5.134 +               (unsigned long)meminfo->virt_load_addr, 
   5.135 +               (unsigned long)virt_stack_addr, 
   5.136 +               (unsigned long)virt_startinfo_addr);
   5.137 +
   5.138 +    return 0;
   5.139 +}
   5.140 +     
   5.141  /*
   5.142   * Initial load map:
   5.143   *  start_address:
   5.144 @@ -389,11 +516,6 @@ static unsigned int alloc_new_dom_mem(st
   5.145   *      <one page>
   5.146   */
   5.147  #define MB_PER_DOMAIN 16
   5.148 -#include <asm/msr.h>
   5.149 -#include <xeno/multiboot.h>
   5.150 -extern int nr_mods;
   5.151 -extern module_t *mod;
   5.152 -extern unsigned char *cmdline;
   5.153  int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
   5.154  {
   5.155  #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
   5.156 @@ -599,6 +721,8 @@ int setup_guestos(struct task_struct *p,
   5.157      virt_startinfo_address->phys_base = start_address;
   5.158      /* NB. Next field will be NULL if dom != 0. */
   5.159      virt_startinfo_address->frame_table = virt_ftable_start_addr;
   5.160 +    virt_startinfo_address->frame_table_len = ft_size;
   5.161 +    virt_startinfo_address->frame_table_pa = __pa(frame_table);
   5.162  
   5.163      /* Add virtual network interfaces and point to them in startinfo. */
   5.164      while (params->num_vifs-- > 0) {
     7.1 --- a/xen-2.4.16/include/hypervisor-ifs/hypervisor-if.h	Wed Jan 15 00:21:24 2003 +0000
     7.2 +++ b/xen-2.4.16/include/hypervisor-ifs/hypervisor-if.h	Wed Jan 15 14:21:52 2003 +0000
     7.3 @@ -201,6 +201,8 @@ typedef struct start_info_st {
     7.4      int num_net_rings;
     7.5      blk_ring_t *blk_ring;         /* block io communication rings */
     7.6      unsigned long frame_table;    /* mapping of the frame_table for dom0 */
     7.7 +    unsigned long frame_table_len;
     7.8 +    unsigned long frame_table_pa; /* frame_table physical address */
     7.9      unsigned char cmd_line[1];    /* variable-length */
    7.10  } start_info_t;
    7.11  
     8.1 --- a/xen-2.4.16/include/xeno/dom0_ops.h	Wed Jan 15 00:21:24 2003 +0000
     8.2 +++ b/xen-2.4.16/include/xeno/dom0_ops.h	Wed Jan 15 14:21:52 2003 +0000
     8.3 @@ -1,9 +1,10 @@
     8.4 +
     8.5  /******************************************************************************
     8.6   * dom0_ops.h
     8.7   * 
     8.8   * Process command requests from domain-0 guest OS.
     8.9   * 
    8.10 - * Copyright (c) 2002, K A Fraser
    8.11 + * Copyright (c) 2002, K A Fraser, B Dragovic
    8.12   */
    8.13  
    8.14  #ifndef __DOM0_OPS_H__
    8.15 @@ -11,11 +12,16 @@
    8.16  
    8.17  #define DOM0_NEWDOMAIN   0
    8.18  #define DOM0_KILLDOMAIN  1
    8.19 +#define DOM0_MAPTASK     2
    8.20 +#define DOM0_STARTDOM    4
    8.21  
    8.22 -typedef struct dom0_newdomain_st
    8.23 +#define MAX_CMD_LEN    256
    8.24 +
    8.25 +typedef struct dom0_newdomain_st 
    8.26  {
    8.27      unsigned int memory_kb;
    8.28 -    unsigned int num_vifs;
    8.29 +    unsigned int num_vifs;  // temporary
    8.30 +    unsigned int domain; 
    8.31  } dom0_newdomain_t;
    8.32  
    8.33  typedef struct dom0_killdomain_st
    8.34 @@ -23,6 +29,23 @@ typedef struct dom0_killdomain_st
    8.35      unsigned int domain;
    8.36  } dom0_killdomain_t;
    8.37  
    8.38 +typedef struct dom0_map_ts
    8.39 +{
    8.40 +    unsigned int domain;
    8.41 +    unsigned long ts_phy_addr;
    8.42 +} dom0_tsmap_t;
    8.43 +
    8.44 +typedef struct domain_launch
    8.45 +{
    8.46 +    unsigned long domain;
    8.47 +    unsigned long l2_pgt_addr;
    8.48 +    unsigned long virt_load_addr;
    8.49 +    unsigned long virt_shinfo_addr;
    8.50 +    unsigned long virt_startinfo_addr;
    8.51 +    unsigned int num_vifs;
    8.52 +    char cmd_line[MAX_CMD_LEN];
    8.53 +} dom_meminfo_t;
    8.54 +
    8.55  typedef struct dom0_op_st
    8.56  {
    8.57      unsigned long cmd;
    8.58 @@ -30,6 +53,8 @@ typedef struct dom0_op_st
    8.59      {
    8.60          dom0_newdomain_t newdomain;
    8.61          dom0_killdomain_t killdomain;
    8.62 +        dom0_tsmap_t mapdomts;
    8.63 +        dom_meminfo_t meminfo;
    8.64      }
    8.65      u;
    8.66  } dom0_op_t;
     9.1 --- a/xen-2.4.16/include/xeno/sched.h	Wed Jan 15 00:21:24 2003 +0000
     9.2 +++ b/xen-2.4.16/include/xeno/sched.h	Wed Jan 15 14:21:52 2003 +0000
     9.3 @@ -1,3 +1,4 @@
     9.4 +
     9.5  #ifndef _LINUX_SCHED_H
     9.6  #define _LINUX_SCHED_H
     9.7  
     9.8 @@ -57,10 +58,19 @@ extern struct mm_struct init_mm;
     9.9  #include <xeno/block.h>
    9.10  
    9.11  struct task_struct {
    9.12 +
    9.13      int processor;
    9.14      int state, hyp_events;
    9.15      unsigned int domain;
    9.16  
    9.17 +    /* index into frame_table threading pages belonging to this
    9.18 +     * domain together. these are placed at the top of the structure
    9.19 +     * to avoid nasty padding for various kernel structs when using
    9.20 +     * task_struct in user space
    9.21 +     */
    9.22 +    unsigned long pg_head;
    9.23 +    unsigned int tot_pages;
    9.24 +
    9.25      /* An unsafe pointer into a shared data area. */
    9.26      shared_info_t *shared_info;
    9.27      
    9.28 @@ -91,13 +101,7 @@ struct task_struct {
    9.29      struct mm_struct *active_mm;
    9.30      struct thread_struct thread;
    9.31      struct task_struct *prev_task, *next_task;
    9.32 -	
    9.33 -    /* index into frame_table threading pages belonging to this
    9.34 -     * domain together
    9.35 -     */
    9.36 -    unsigned long pg_head;
    9.37 -    unsigned int tot_pages;
    9.38 -
    9.39 +    
    9.40      unsigned long flags;
    9.41  };
    9.42  
    9.43 @@ -142,6 +146,7 @@ extern struct task_struct first_task_str
    9.44  
    9.45  extern struct task_struct *do_newdomain(void);
    9.46  extern int setup_guestos(struct task_struct *p, dom0_newdomain_t *params);
    9.47 +extern int final_setup_guestos(struct task_struct *p, dom_meminfo_t *);
    9.48  
    9.49  struct task_struct *find_domain_by_id(unsigned int dom);
    9.50  extern void release_task(struct task_struct *);
    10.1 --- a/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/Makefile	Wed Jan 15 00:21:24 2003 +0000
    10.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/Makefile	Wed Jan 15 14:21:52 2003 +0000
    10.3 @@ -1,3 +1,3 @@
    10.4  O_TARGET := dom0.o
    10.5 -obj-y := dom0_core.o vfr.o
    10.6 +obj-y := dom0_memory.o dom0_core.o vfr.o
    10.7  include $(TOPDIR)/Rules.make
    11.1 --- a/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_core.c	Wed Jan 15 00:21:24 2003 +0000
    11.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_core.c	Wed Jan 15 14:21:52 2003 +0000
    11.3 @@ -3,7 +3,7 @@
    11.4   * 
    11.5   * Interface to privileged domain-0 commands.
    11.6   * 
    11.7 - * Copyright (c) 2002, K A Fraser
    11.8 + * Copyright (c) 2002, K A Fraser, B Dragovic
    11.9   */
   11.10  
   11.11  #include <linux/config.h>
   11.12 @@ -15,13 +15,49 @@
   11.13  #include <linux/errno.h>
   11.14  #include <linux/proc_fs.h>
   11.15  
   11.16 +#include <linux/mm.h>
   11.17 +#include <linux/mman.h>
   11.18 +#include <linux/swap.h>
   11.19 +#include <linux/smp_lock.h>
   11.20 +#include <linux/swapctl.h>
   11.21 +#include <linux/iobuf.h>
   11.22 +#include <linux/highmem.h>
   11.23 +#include <linux/pagemap.h>
   11.24 +
   11.25 +#include <asm/pgalloc.h>
   11.26 +#include <asm/pgtable.h>
   11.27 +#include <asm/uaccess.h>
   11.28 +#include <asm/tlb.h>
   11.29 +
   11.30  #include "dom0_ops.h"
   11.31 +#include "hypervisor_defs.h"
   11.32  
   11.33 -static struct proc_dir_entry *proc_dom0;
   11.34 +#define XENO_BASE       "xeno"          // proc file name defs should be in separate .h
   11.35 +#define DOM0_CMD_INTF   "dom0_cmd"
   11.36 +#define DOM0_FT         "frame_table"
   11.37 +#define DOM0_NEWDOM     "new_dom_id"
   11.38 +
   11.39 +#define MAX_LEN         16
   11.40 +#define DOM_DIR         "dom"
   11.41 +#define DOM_TS          "task_data"
   11.42 +#define DOM_MEM         "mem"
   11.43 +
   11.44 +static struct proc_dir_entry *xeno_base;
   11.45 +static struct proc_dir_entry *dom0_cmd_intf;
   11.46 +static struct proc_dir_entry *proc_ft;
   11.47 +
   11.48 +unsigned long direct_mmap(unsigned long, unsigned long, pgprot_t, int, int);
   11.49 +int direct_unmap(unsigned long, unsigned long);
   11.50 +int direct_disc_unmap(unsigned long, unsigned long, int);
   11.51 +
   11.52 +/* frame_table mapped from dom0 */
   11.53 +frame_table_t * frame_table;
   11.54 +unsigned long frame_table_len;
   11.55 +unsigned long frame_table_pa;
   11.56  
   11.57  static unsigned char readbuf[1204];
   11.58  
   11.59 -static int dom0_read_proc(char *page, char **start, off_t off,
   11.60 +static int cmd_read_proc(char *page, char **start, off_t off,
   11.61                            int count, int *eof, void *data)
   11.62  {
   11.63      strcpy(page, readbuf);
   11.64 @@ -31,97 +67,303 @@ static int dom0_read_proc(char *page, ch
   11.65      return strlen(page);
   11.66  }
   11.67  
   11.68 +static ssize_t ts_read(struct file * file, char * buff, size_t size, loff_t * off)
   11.69 +{
   11.70 +    dom0_op_t op;
   11.71 +    unsigned long addr;
   11.72 +    pgprot_t prot;
   11.73 +    int ret = 0;
   11.74  
   11.75 -static int dom0_write_proc(struct file *file, const char *buffer, 
   11.76 +    /* retrieve domain specific data from proc_dir_entry */
   11.77 +    dom_procdata_t * dom_data = (dom_procdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
   11.78 +    
   11.79 +    /* 
   11.80 +     * get the phys addr of the task struct for the requested
   11.81 +     * domain
   11.82 +     */
   11.83 +    op.cmd = DOM0_MAPTASK;
   11.84 +    op.u.mapdomts.domain = dom_data->domain;
   11.85 +    op.u.mapdomts.ts_phy_addr = -1;
   11.86 +
   11.87 +    ret = HYPERVISOR_dom0_op(&op);
   11.88 +    if(ret != 0)
   11.89 +       return -EAGAIN;
   11.90 +
   11.91 +    prot = PAGE_SHARED; 
   11.92 +
   11.93 +    /* remap the range using xen specific routines */
   11.94 +    addr = direct_mmap(op.u.mapdomts.ts_phy_addr, PAGE_SIZE, prot, 0, 0);
   11.95 +    copy_to_user((unsigned long *)buff, &addr, sizeof(addr));
   11.96 +    dom_data->map_size = PAGE_SIZE;
   11.97 +
   11.98 +    return sizeof(addr);
   11.99 +     
  11.100 +}
  11.101 +
  11.102 +static ssize_t ts_write(struct file * file, const char * buff, size_t size , loff_t * off)
  11.103 +{
  11.104 +    unsigned long addr;
  11.105 +    dom_procdata_t * dom_data = (dom_procdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
  11.106 +    
  11.107 +    copy_from_user(&addr, (unsigned long *)buff, sizeof(addr));
  11.108 +    
  11.109 +    if(direct_unmap(addr, dom_data->map_size) == 0){
  11.110 +        return sizeof(addr);
  11.111 +    } else {
  11.112 +        return -1;
  11.113 +    }
  11.114 +}
  11.115 + 
  11.116 +struct file_operations ts_ops = {
  11.117 +    read:   ts_read,
  11.118 +    write:  ts_write,
  11.119 +};
  11.120 +
  11.121 +static void create_proc_dom_entries(int dom)
  11.122 +{
  11.123 +    struct proc_dir_entry * dir;
  11.124 +    struct proc_dir_entry * file;
  11.125 +    dom_procdata_t * dom_data;
  11.126 +    char dir_name[MAX_LEN];
  11.127 +
  11.128 +    snprintf(dir_name, MAX_LEN, "%s%d", DOM_DIR, dom);
  11.129 +
  11.130 +    dom_data = (dom_procdata_t *)kmalloc(sizeof(dom_procdata_t), GFP_KERNEL);
  11.131 +    dom_data->domain = dom;
  11.132 +
  11.133 +    dir = proc_mkdir(dir_name, xeno_base);
  11.134 +    dir->data = dom_data;
  11.135 +
  11.136 +    file = create_proc_entry(DOM_TS, 0600, dir);
  11.137 +    if(file != NULL)
  11.138 +    {   
  11.139 +        file->owner = THIS_MODULE;
  11.140 +        file->nlink = 1;
  11.141 +        file->proc_fops = &ts_ops;
  11.142 +    
  11.143 +        file->data = dom_data;
  11.144 +    }
  11.145 +}
  11.146 +
  11.147 +static ssize_t dom_mem_write(struct file * file, const char * buff, 
  11.148 +	size_t size , loff_t * off)
  11.149 +{
  11.150 +    unsigned long addr;
  11.151 +    proc_memdata_t * mem_data = (proc_memdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
  11.152 +    
  11.153 +    copy_from_user(&addr, (unsigned long *)buff, sizeof(addr));
  11.154 +    
  11.155 +    if(direct_disc_unmap(addr, mem_data->pfn, mem_data->tot_pages) == 0){
  11.156 +        return sizeof(addr);
  11.157 +    } else {
  11.158 +        return -1;
  11.159 +    }
  11.160 +}
  11.161 +
  11.162 +static ssize_t dom_mem_read(struct file * file, char * buff, size_t size, loff_t * off)
  11.163 +{
  11.164 +    unsigned long addr;
  11.165 +    pgprot_t prot;
  11.166 +
  11.167 +    proc_memdata_t * mem_data = (proc_memdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
  11.168 +
  11.169 +    prot = PAGE_SHARED; 
  11.170 +
  11.171 +    /* remap the range using xen specific routines */
  11.172 +
  11.173 +    addr = direct_mmap(mem_data->pfn << PAGE_SHIFT, mem_data->tot_pages << PAGE_SHIFT, prot, 0, 0);
  11.174 +	printk(KERN_ALERT "bd240 debug: dom_mem_read: %lx, %lx @ %lx\n", mem_data->pfn << PAGE_SHIFT, mem_data->tot_pages << PAGE_SHIFT, addr);
  11.175 +
  11.176 +    copy_to_user((unsigned long *)buff, &addr, sizeof(addr));
  11.177 +
  11.178 +	printk(KERN_ALERT "bd240 debug: exiting dom_mem_read\n");
  11.179 +
  11.180 +    return sizeof(addr);
  11.181 +     
  11.182 +}
  11.183 +
  11.184 +struct file_operations dom_mem_ops = {
  11.185 +    read:    dom_mem_read,
  11.186 +    write:   dom_mem_write,
  11.187 +};
  11.188 +
  11.189 +static int dom_map_mem(unsigned int dom, unsigned long pfn, int tot_pages)
  11.190 +{
  11.191 +    int ret = -ENOENT;
  11.192 +    struct proc_dir_entry * pd = xeno_base->subdir;
  11.193 +    struct proc_dir_entry * file;
  11.194 +    proc_memdata_t * memdata;
  11.195 +
  11.196 +    while(pd != NULL){
  11.197 +
  11.198 +        if((pd->mode & S_IFDIR) && ((dom_procdata_t *)pd->data)->domain == dom){
  11.199 +
  11.200 +            /* check if there is already an entry for mem and if so
  11.201 +             * remove it.
  11.202 +             */
  11.203 +            remove_proc_entry(DOM_MEM, pd);
  11.204 +
  11.205 +            /* create new entry with parameters describing what to do
  11.206 +             * when it is mmaped.
  11.207 +             */
  11.208 +            file = create_proc_entry(DOM_MEM, 0600, pd);
  11.209 +            if(file != NULL)
  11.210 +            {
  11.211 +                file->owner = THIS_MODULE;
  11.212 +                file->nlink = 1;
  11.213 +                file->proc_fops = &dom_mem_ops;
  11.214 +
  11.215 +                memdata = (proc_memdata_t *)kmalloc(sizeof(proc_memdata_t), GFP_KERNEL);
  11.216 +                memdata->pfn = pfn;
  11.217 +                memdata->tot_pages = tot_pages;
  11.218 +                file->data = memdata;
  11.219 +
  11.220 +				printk(KERN_ALERT "bd240 debug: cmd setup dom mem: %lx, %d\n", memdata->pfn, memdata->tot_pages);
  11.221 +
  11.222 +                ret = 0;
  11.223 +                break;
  11.224 +            }
  11.225 +            ret = -EAGAIN;
  11.226 +            break;
  11.227 +        }                    
  11.228 +        pd = pd->next;
  11.229 +    }
  11.230 +
  11.231 +    return ret;
  11.232 +}
  11.233 +
  11.234 +/* return dom id stored as data pointer to userspace */
  11.235 +static int dom_id_read_proc(char *page, char **start, off_t off,
  11.236 +                          int count, int *eof, void *data)
  11.237 +{
  11.238 +    char arg[16];
  11.239 +    sprintf(arg, "%d", (int)data);
  11.240 +    strcpy(page, arg);
  11.241 +    remove_proc_entry(DOM0_NEWDOM, xeno_base);
  11.242 +    return sizeof(unsigned int);
  11.243 +}
  11.244 +
  11.245 +static int cmd_write_proc(struct file *file, const char *buffer, 
  11.246                             u_long count, void *data)
  11.247  {
  11.248      dom0_op_t op;
  11.249 -    unsigned char c;
  11.250 -    unsigned int val;
  11.251 -    unsigned char result[20];
  11.252 -    int len = count, ret;
  11.253 +    int ret = 0;
  11.254 +    struct proc_dir_entry * new_dom_id;
  11.255 +    
  11.256 +    copy_from_user(&op, buffer, sizeof(dom0_op_t));
  11.257  
  11.258 -    while ( count )
  11.259 -    {
  11.260 -        c = *buffer++;
  11.261 -        count--;
  11.262 -        val = 0;
  11.263 -        if ( c == 'N' )
  11.264 -        {
  11.265 -            op.cmd = DOM0_NEWDOMAIN;
  11.266 -            while ( count && ((c = *buffer) >= '0') && (c <= '9') )
  11.267 -            {
  11.268 -                val *= 10;
  11.269 -                val += c - '0';
  11.270 -                buffer++; count--;
  11.271 -            }      
  11.272 -            op.u.newdomain.memory_kb = val;
  11.273 -            val = 0;
  11.274 -            if (count && (*buffer == ',')) 
  11.275 -            { 
  11.276 -                buffer++; count--;
  11.277 -                while ( count && ((c = *buffer) >= '0') && (c <= '9') )
  11.278 -                {
  11.279 -                    val *= 10;
  11.280 -                    val += c - '0';
  11.281 -                    buffer++; count--;
  11.282 -                }
  11.283 -            } 
  11.284 -            else 
  11.285 -            {
  11.286 -                val = 1; // default to 1 vif.
  11.287 -            }
  11.288 -            op.u.newdomain.num_vifs = val;
  11.289 -            ret = HYPERVISOR_dom0_op(&op);
  11.290 -        }
  11.291 -        else if ( c == 'K' )
  11.292 -        {
  11.293 -            op.cmd = DOM0_KILLDOMAIN;
  11.294 -            while ( count && ((c = *buffer) >= '0') && (c <= '9') )
  11.295 -            {
  11.296 -                val *= 10;
  11.297 -                val += c - '0';
  11.298 -                buffer++; count--;
  11.299 -            }        
  11.300 -            op.u.killdomain.domain = val;
  11.301 -            ret = HYPERVISOR_dom0_op(&op);
  11.302 -        }
  11.303 -        else
  11.304 -        {
  11.305 -            ret = -ENOSYS;
  11.306 -        }
  11.307 -        
  11.308 -        sprintf(result, "%d\n", ret);
  11.309 -        strcat(readbuf, result);
  11.310 -
  11.311 -        while ( count-- && (*buffer++ != '\n') ) continue;
  11.312 +    /* do some sanity checks */
  11.313 +    if(op.cmd > MAX_CMD){
  11.314 +        ret = -ENOSYS;
  11.315 +        goto out;
  11.316      }
  11.317  
  11.318 -    return len;
  11.319 +    /* is the request intended for hypervisor? */
  11.320 +    if(op.cmd != MAP_DOM_MEM){
  11.321 +        ret = HYPERVISOR_dom0_op(&op);
  11.322 +
  11.323 +        /* if new domain created, create proc entries */
  11.324 +        if(op.cmd == DOM0_NEWDOMAIN){
  11.325 +            create_proc_dom_entries(ret);
  11.326 +
  11.327 +            /* now notify user space of the new domain's id */
  11.328 +            new_dom_id = create_proc_entry(DOM0_NEWDOM, 0600, xeno_base);
  11.329 +            if ( new_dom_id != NULL )
  11.330 +            {
  11.331 +                new_dom_id->owner      = THIS_MODULE;
  11.332 +                new_dom_id->nlink      = 1;
  11.333 +                new_dom_id->read_proc  = dom_id_read_proc; 
  11.334 +                new_dom_id->data       = (void *)ret; 
  11.335 +            }
  11.336 +
  11.337 +        }
  11.338 +
  11.339 +    } else {
  11.340 +
  11.341 +        ret = dom_map_mem(op.u.reqdommem.domain, op.u.reqdommem.start_pfn, 
  11.342 +                        op.u.reqdommem.tot_pages); 
  11.343 +    }
  11.344 +    
  11.345 +out:
  11.346 +    return ret;
  11.347 +    
  11.348  }
  11.349  
  11.350 +static ssize_t ft_write(struct file * file, const char * buff, size_t size , loff_t * off)
  11.351 +{
  11.352 +    unsigned long addr;
  11.353 +    
  11.354 +    copy_from_user(&addr, (unsigned long *)buff, sizeof(addr));
  11.355 +    
  11.356 +    if(direct_unmap(addr, frame_table_len) == 0){
  11.357 +        return sizeof(addr);
  11.358 +    } else {
  11.359 +        return -1;
  11.360 +    }
  11.361 +}
  11.362 +
  11.363 +static ssize_t ft_read(struct file * file, char * buff, size_t size, loff_t * off)
  11.364 +{
  11.365 +    unsigned long addr;
  11.366 +    pgprot_t prot;
  11.367 +
  11.368 +    prot = PAGE_SHARED; 
  11.369 +
  11.370 +    /* remap the range using xen specific routines */
  11.371 +    addr = direct_mmap(frame_table_pa, frame_table_len, prot, 0, 0);
  11.372 +    copy_to_user((unsigned long *)buff, &addr, sizeof(addr));
  11.373 +
  11.374 +    return sizeof(addr);
  11.375 +     
  11.376 +}
  11.377 +
  11.378 +struct file_operations ft_ops = {
  11.379 +    read:   ft_read,
  11.380 +    write: ft_write,
  11.381 +};
  11.382  
  11.383  static int __init init_module(void)
  11.384  {
  11.385 +    
  11.386 +    frame_table = (frame_table_t *)start_info.frame_table;
  11.387 +    frame_table_len = start_info.frame_table_len;
  11.388 +    frame_table_pa = start_info.frame_table_pa;
  11.389 +
  11.390 +    /* xeno proc root setup */
  11.391 +    xeno_base = proc_mkdir(XENO_BASE, &proc_root); 
  11.392 +
  11.393 +    /* xeno control interface */
  11.394      *readbuf = '\0';
  11.395 -    proc_dom0 = create_proc_entry ("dom0", 0600, &proc_root);
  11.396 -    if ( proc_dom0 != NULL )
  11.397 +    dom0_cmd_intf = create_proc_entry (DOM0_CMD_INTF, 0600, xeno_base);
  11.398 +    if ( dom0_cmd_intf != NULL )
  11.399      {
  11.400 -        proc_dom0->owner      = THIS_MODULE;
  11.401 -        proc_dom0->nlink      = 1;
  11.402 -        proc_dom0->read_proc  = dom0_read_proc;
  11.403 -        proc_dom0->write_proc = dom0_write_proc;
  11.404 -        printk("Successfully installed domain-0 control interface\n");
  11.405 +        dom0_cmd_intf->owner      = THIS_MODULE;
  11.406 +        dom0_cmd_intf->nlink      = 1;
  11.407 +        dom0_cmd_intf->read_proc  = cmd_read_proc;
  11.408 +        dom0_cmd_intf->write_proc = cmd_write_proc;
  11.409      }
  11.410 +
  11.411 +    /* frame table mapping, to be mmaped */
  11.412 +    proc_ft = create_proc_entry(DOM0_FT, 0600, xeno_base);
  11.413 +    if(proc_ft != NULL)
  11.414 +    {   
  11.415 +        proc_ft->owner = THIS_MODULE;
  11.416 +        proc_ft->nlink = 1;
  11.417 +        proc_ft->proc_fops = &ft_ops;
  11.418 +    }
  11.419 +
  11.420 +    /* set up /proc entries for dom 0 */
  11.421 +    create_proc_dom_entries(0);
  11.422 +
  11.423      return 0;
  11.424  }
  11.425  
  11.426  
  11.427  static void __exit cleanup_module(void)
  11.428  {
  11.429 -    if ( proc_dom0 == NULL ) return;
  11.430 +    if ( dom0_cmd_intf == NULL ) return;
  11.431      remove_proc_entry("dom0", &proc_root);
  11.432 -    proc_dom0 = NULL;
  11.433 +    dom0_cmd_intf = NULL;
  11.434  }
  11.435  
  11.436  
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_memory.c	Wed Jan 15 14:21:52 2003 +0000
    12.3 @@ -0,0 +1,367 @@
    12.4 +
    12.5 +#include <linux/slab.h>
    12.6 +#include <linux/mm.h>
    12.7 +#include <linux/mman.h>
    12.8 +#include <linux/swap.h>
    12.9 +#include <linux/smp_lock.h>
   12.10 +#include <linux/swapctl.h>
   12.11 +#include <linux/iobuf.h>
   12.12 +#include <linux/highmem.h>
   12.13 +#include <linux/pagemap.h>
   12.14 +#include <linux/list.h>
   12.15 +
   12.16 +#include <asm/pgalloc.h>
   12.17 +#include <asm/uaccess.h>
   12.18 +#include <asm/tlb.h>
   12.19 +#include <asm/mmu.h>
   12.20 +
   12.21 +#include "hypervisor_defs.h"
   12.22 +
   12.23 +#define MAP_CONT    0
   12.24 +#define MAP_DISCONT 1
   12.25 +
   12.26 +extern struct list_head * find_direct(struct list_head *, unsigned long);
   12.27 +
   12.28 +/* bd240: functions below perform direct mapping to the real physical pages needed for
   12.29 + * mapping various hypervisor specific structures needed in dom0 userspace by various
   12.30 + * management applications such as domain builder etc.
   12.31 + */
   12.32 +
   12.33 +#define direct_set_pte(pteptr, pteval) queue_l1_entry_update(__pa(pteptr) | PGREQ_UNCHECKED_UPDATE, (pteval).pte_low)
   12.34 +
   12.35 +#define direct_pte_clear(pteptr) queue_l1_entry_update(__pa(pteptr) | PGREQ_UNCHECKED_UPDATE, 0)
   12.36 +
   12.37 +#define __direct_pte(x) ((pte_t) { (x) } )
   12.38 +#define __direct_mk_pte(page_nr,pgprot) __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
   12.39 +#define direct_mk_pte_phys(physpage, pgprot)   __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
   12.40 +
   12.41 +static inline void forget_pte(pte_t page)
   12.42 +{
   12.43 +    if (!pte_none(page)) {
   12.44 +        printk("forget_pte: old mapping existed!\n");
   12.45 +        BUG();
   12.46 +    }
   12.47 +}
   12.48 +
   12.49 +static inline void direct_remappte_range(pte_t * pte, unsigned long address, unsigned long size,
   12.50 +	unsigned long phys_addr, pgprot_t prot)
   12.51 +{
   12.52 +	unsigned long end;
   12.53 +
   12.54 +	address &= ~PMD_MASK;
   12.55 +	end = address + size;
   12.56 +	if (end > PMD_SIZE)
   12.57 +		end = PMD_SIZE;
   12.58 +	do {
   12.59 +		pte_t oldpage;
   12.60 +		oldpage = ptep_get_and_clear(pte);
   12.61 +    /*    
   12.62 +		printk(KERN_ALERT "bd240 debug: %lx - %lx\n", pte, phys_addr);
   12.63 +	*/
   12.64 + 		direct_set_pte(pte, direct_mk_pte_phys(phys_addr, prot));
   12.65 +
   12.66 +		forget_pte(oldpage);
   12.67 +		address += PAGE_SIZE;
   12.68 +		phys_addr += PAGE_SIZE;
   12.69 +		pte++;
   12.70 +	} while (address && (address < end));
   12.71 +
   12.72 +}
   12.73 +
   12.74 +static inline int direct_remappmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
   12.75 +	unsigned long phys_addr, pgprot_t prot)
   12.76 +{
   12.77 +	unsigned long end;
   12.78 +
   12.79 +	address &= ~PGDIR_MASK;
   12.80 +	end = address + size;
   12.81 +	if (end > PGDIR_SIZE)
   12.82 +		end = PGDIR_SIZE;
   12.83 +	phys_addr -= address;
   12.84 +	do {
   12.85 +		pte_t * pte = pte_alloc(mm, pmd, address);
   12.86 +		if (!pte)
   12.87 +			return -ENOMEM;
   12.88 +		direct_remappte_range(pte, address, end - address, address + phys_addr, prot);
   12.89 +		address = (address + PMD_SIZE) & PMD_MASK;
   12.90 +		pmd++;
   12.91 +	} while (address && (address < end));
   12.92 +	return 0;
   12.93 +}
   12.94 +
   12.95 +/*  Note: this is only safe if the mm semaphore is held when called. */
   12.96 +int direct_remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
   12.97 +{
   12.98 +	int error = 0;
   12.99 +	pgd_t * dir;
  12.100 +	unsigned long beg = from;
  12.101 +	unsigned long end = from + size;
  12.102 +	struct mm_struct *mm = current->mm;
  12.103 +
  12.104 +	phys_addr -= from;
  12.105 +	dir = pgd_offset(mm, from);
  12.106 +	flush_cache_range(mm, beg, end);
  12.107 +	if (from >= end)
  12.108 +		BUG();
  12.109 +
  12.110 +	spin_lock(&mm->page_table_lock);
  12.111 +	do {
  12.112 +		pmd_t *pmd = pmd_alloc(mm, dir, from);
  12.113 +		error = -ENOMEM;
  12.114 +		if (!pmd)
  12.115 +			break;
  12.116 +		error = direct_remappmd_range(mm, pmd, from, end - from, phys_addr + from, prot);
  12.117 +		if (error)
  12.118 +			break;
  12.119 +		from = (from + PGDIR_SIZE) & PGDIR_MASK;
  12.120 +		dir++;
  12.121 +	} while (from && (from < end));
  12.122 +	spin_unlock(&mm->page_table_lock);
  12.123 +	flush_tlb_range(mm, beg, end);
  12.124 +	return error;
  12.125 +}
  12.126 +
  12.127 +/* 
  12.128 + * used for remapping discontiguous bits of domain's memory, pages to map are
  12.129 + * found from frame table beginning at the given first_pg index
  12.130 + */ 
  12.131 +int direct_remap_disc_page_range(unsigned long from, unsigned long first_pg,
  12.132 +                int tot_pages, pgprot_t prot)
  12.133 +{
  12.134 +    frame_table_t * current_ft;
  12.135 +    unsigned long current_pfn;
  12.136 +    unsigned long start = from;
  12.137 +    int count = 0;
  12.138 +
  12.139 +    current_ft = (frame_table_t *)(frame_table + first_pg);
  12.140 +    current_pfn = first_pg; 
  12.141 +    while(count < tot_pages){
  12.142 +            if(direct_remap_page_range(start, current_pfn << PAGE_SHIFT, PAGE_SIZE, prot))
  12.143 +                goto out;
  12.144 +            start += PAGE_SIZE;
  12.145 +            current_pfn = current_ft->next;
  12.146 +            current_ft = (frame_table_t *)(frame_table + current_pfn);
  12.147 +            count++;
  12.148 +    }
  12.149 +
  12.150 +out:
  12.151 +
  12.152 +    return tot_pages - count;
  12.153 +} 
  12.154 +           
  12.155 +/* below functions replace standard sys_mmap and sys_munmap which are absolutely useless
  12.156 + * for direct memory mapping. direct_zap* functions are minor ammendments to the 
  12.157 + * original versions in mm/memory.c. the changes are to enable unmapping of real physical
  12.158 + * addresses.
  12.159 + */
  12.160 +
  12.161 +unsigned long direct_mmap(unsigned long phys_addr, unsigned long size, 
  12.162 +                pgprot_t prot, int flag, int tot_pages)
  12.163 +{
  12.164 +    direct_mmap_node_t * dmmap;
  12.165 +    struct list_head * entry;
  12.166 +    unsigned long addr;
  12.167 +    int ret = 0;
  12.168 +    
  12.169 +    if(!capable(CAP_SYS_ADMIN)){
  12.170 +        ret = -EPERM;
  12.171 +        goto out;
  12.172 +    }
  12.173 +
  12.174 +    /* get unmapped area invokes xen specific arch_get_unmapped_area */
  12.175 +    addr = get_unmapped_area(NULL, 0, size, 0, 0);
  12.176 +    if(addr & ~PAGE_MASK){
  12.177 +        ret = -ENOMEM;
  12.178 +        goto out;
  12.179 +    }
  12.180 +
  12.181 +    /* add node on the list of directly mapped areas, make sure the
  12.182 +	 * list remains sorted.
  12.183 +	 */ 
  12.184 +    dmmap = (direct_mmap_node_t *)kmalloc(sizeof(direct_mmap_node_t), GFP_KERNEL);
  12.185 +    dmmap->vm_start = addr;
  12.186 +    dmmap->vm_end = addr + size;
  12.187 +	entry = find_direct(&current->mm->context.direct_list, addr);
  12.188 +	if(entry != &current->mm->context.direct_list){
  12.189 +		list_add_tail(&dmmap->list, entry);
  12.190 +		printk(KERN_ALERT "bd240 debug: added node %lx, size %lx in the middle\n", dmmap->vm_start, size);
  12.191 +	} else {
  12.192 +    	list_add_tail(&dmmap->list, &current->mm->context.direct_list);
  12.193 +		printk(KERN_ALERT "bd240 debug: added node %lx, size %lx at tail\n", dmmap->vm_start, size);
  12.194 +	}
  12.195 +
  12.196 +    /* and perform the mapping */
  12.197 +    if(flag == MAP_DISCONT){
  12.198 +        ret = direct_remap_disc_page_range(addr, phys_addr, tot_pages, prot);
  12.199 +    } else {
  12.200 +        ret = direct_remap_page_range(addr, phys_addr, size, prot);
  12.201 +    }
  12.202 +
  12.203 +    if(ret == 0)
  12.204 +        ret = addr;
  12.205 +
  12.206 +out: 
  12.207 +    return ret;
  12.208 +}
  12.209 +
  12.210 +/* most of the checks, refcnt updates, cache stuff have been thrown out as they are not
  12.211 + * needed
  12.212 + */
  12.213 +static inline int direct_zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, 
  12.214 +                unsigned long size)
  12.215 +{
  12.216 +	unsigned long offset;
  12.217 +	pte_t * ptep;
  12.218 +	int freed = 0;
  12.219 +
  12.220 +	if (pmd_none(*pmd))
  12.221 +		return 0;
  12.222 +	if (pmd_bad(*pmd)) {
  12.223 +		pmd_ERROR(*pmd);
  12.224 +		pmd_clear(pmd);
  12.225 +		return 0;
  12.226 +	}
  12.227 +	ptep = pte_offset(pmd, address);
  12.228 +	offset = address & ~PMD_MASK;
  12.229 +	if (offset + size > PMD_SIZE)
  12.230 +		size = PMD_SIZE - offset;
  12.231 +	size &= PAGE_MASK;
  12.232 +	for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
  12.233 +		pte_t pte = *ptep;
  12.234 +		if (pte_none(pte))
  12.235 +			continue;
  12.236 +		freed ++;
  12.237 +		direct_pte_clear(ptep);
  12.238 +	}
  12.239 +
  12.240 +	return freed;
  12.241 +}
  12.242 +
  12.243 +static inline int direct_zap_pmd_range(mmu_gather_t *tlb, pgd_t * dir, 
  12.244 +                unsigned long address, unsigned long size)
  12.245 +{
  12.246 +	pmd_t * pmd;
  12.247 +	unsigned long end;
  12.248 +	int freed;
  12.249 +
  12.250 +	if (pgd_none(*dir))
  12.251 +		return 0;
  12.252 +	if (pgd_bad(*dir)) {
  12.253 +		pgd_ERROR(*dir);
  12.254 +		pgd_clear(dir);
  12.255 +		return 0;
  12.256 +	}
  12.257 +	pmd = pmd_offset(dir, address);
  12.258 +	end = address + size;
  12.259 +	if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
  12.260 +		end = ((address + PGDIR_SIZE) & PGDIR_MASK);
  12.261 +	freed = 0;
  12.262 +	do {
  12.263 +		freed += direct_zap_pte_range(tlb, pmd, address, end - address);
  12.264 +		address = (address + PMD_SIZE) & PMD_MASK; 
  12.265 +		pmd++;
  12.266 +	} while (address < end);
  12.267 +	return freed;
  12.268 +}
  12.269 +
  12.270 +/*
  12.271 + * remove user pages in a given range.
  12.272 + */
  12.273 +void direct_zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
  12.274 +{
  12.275 +	mmu_gather_t *tlb;
  12.276 +	pgd_t * dir;
  12.277 +	unsigned long start = address, end = address + size;
  12.278 +	int freed = 0;
  12.279 +
  12.280 +	dir = pgd_offset(mm, address);
  12.281 +
  12.282 +	/*
  12.283 +	 * This is a long-lived spinlock. That's fine.
  12.284 +	 * There's no contention, because the page table
  12.285 +	 * lock only protects against kswapd anyway, and
  12.286 +	 * even if kswapd happened to be looking at this
  12.287 +	 * process we _want_ it to get stuck.
  12.288 +	 */
  12.289 +	if (address >= end)
  12.290 +		BUG();
  12.291 +	spin_lock(&mm->page_table_lock);
  12.292 +	flush_cache_range(mm, address, end);
  12.293 +	tlb = tlb_gather_mmu(mm);
  12.294 +
  12.295 +	do {
  12.296 +		freed += direct_zap_pmd_range(tlb, dir, address, end - address);
  12.297 +		address = (address + PGDIR_SIZE) & PGDIR_MASK;
  12.298 +		dir++;
  12.299 +	} while (address && (address < end));
  12.300 +
  12.301 +	/* this will flush any remaining tlb entries */
  12.302 +	tlb_finish_mmu(tlb, start, end);
  12.303 +
  12.304 +    /* decrementing rss removed */
  12.305 +
  12.306 +	spin_unlock(&mm->page_table_lock);
  12.307 +}
  12.308 +
  12.309 +int direct_unmap(unsigned long addr, unsigned long size)
  12.310 +{
  12.311 +    direct_mmap_node_t * node;
  12.312 +    struct list_head * curr;
  12.313 +    struct list_head * direct_list = &current->mm->context.direct_list;    
  12.314 +
  12.315 +    curr = direct_list->next;
  12.316 +    while(curr != direct_list){
  12.317 +        node = list_entry(curr, direct_mmap_node_t, list);
  12.318 +        if(node->vm_start == addr)
  12.319 +            break;
  12.320 +        curr = curr->next;
  12.321 +    }
  12.322 +
  12.323 +    if(curr == direct_list)
  12.324 +        return -1;
  12.325 +
  12.326 +    list_del(&node->list);
  12.327 +	printk(KERN_ALERT "bd240 debug: delisted %lx from dlist\n", node->vm_start);
  12.328 +    kfree(node);
  12.329 +
  12.330 +    direct_zap_page_range(current->mm, addr, size);
  12.331 + 
  12.332 +    return 0;
  12.333 +}
  12.334 +
  12.335 +int direct_disc_unmap(unsigned long from, unsigned long first_pg, int tot_pages)
  12.336 +{
  12.337 +    int count = 0;
  12.338 +    direct_mmap_node_t * node;
  12.339 +    struct list_head * curr;
  12.340 +    struct list_head * direct_list = &current->mm->context.direct_list;    
  12.341 +
  12.342 +	printk(KERN_ALERT "bd240 debug: direct_disc_unmap\n");
  12.343 +
  12.344 +    curr = direct_list->next;
  12.345 +    while(curr != direct_list){
  12.346 +        node = list_entry(curr, direct_mmap_node_t, list);
  12.347 +
  12.348 +        if(node->vm_start == from)
  12.349 +            break;
  12.350 +        curr = curr->next;
  12.351 +    }
  12.352 +
  12.353 +    if(curr == direct_list)
  12.354 +        return -1;
  12.355 +
  12.356 +	printk(KERN_ALERT "bd240 debug: direct_disc_unmap, deleted from direct_list\n");
  12.357 +
  12.358 +    list_del(&node->list);
  12.359 +    kfree(node);
  12.360 +
  12.361 +	printk(KERN_ALERT "bd240 debug: direct_disc_unmap, from %lx, tot_pages %lx\n", from, tot_pages);
  12.362 +
  12.363 +    while(count < tot_pages){
  12.364 +            direct_zap_page_range(current->mm, from, PAGE_SIZE);
  12.365 +            from += PAGE_SIZE;
  12.366 +            count++;
  12.367 +    }
  12.368 +
  12.369 +    return 0;
  12.370 +} 
    13.1 --- a/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_ops.h	Wed Jan 15 00:21:24 2003 +0000
    13.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_ops.h	Wed Jan 15 14:21:52 2003 +0000
    13.3 @@ -1,18 +1,26 @@
    13.4 +
    13.5  /******************************************************************************
    13.6   * dom0_ops.h
    13.7   * 
    13.8   * Process command requests from domain-0 guest OS.
    13.9   * 
   13.10 - * Copyright (c) 2002, K A Fraser
   13.11 + * Copyright (c) 2002, K A Fraser, B Dragovic
   13.12   */
   13.13  
   13.14  #define DOM0_NEWDOMAIN   0
   13.15  #define DOM0_KILLDOMAIN  1
   13.16 +#define DOM0_MAPTASK     2
   13.17 +#define MAP_DOM_MEM      3
   13.18 +#define DOM0_STARTDOM    4
   13.19 +#define MAX_CMD          4
   13.20 +
   13.21 +#define MAX_CMD_LEN     256
   13.22  
   13.23  typedef struct dom0_newdomain_st
   13.24  {
   13.25      unsigned int memory_kb;
   13.26 -    unsigned int num_vifs;
   13.27 +    unsigned int num_vifs;  // temporary
   13.28 +    unsigned int domain;    // return parameter
   13.29  } dom0_newdomain_t;
   13.30  
   13.31  typedef struct dom0_killdomain_st
   13.32 @@ -20,6 +28,29 @@ typedef struct dom0_killdomain_st
   13.33      unsigned int domain;
   13.34  } dom0_killdomain_t;
   13.35  
   13.36 +typedef struct dom0_map_ts
   13.37 +{
   13.38 +    unsigned int domain;
   13.39 +    unsigned long ts_phy_addr;
   13.40 +} dom0_tsmap_t;
   13.41 +
   13.42 +typedef struct dom_mem_req 
   13.43 +{
   13.44 +    unsigned int domain;
   13.45 +    unsigned long start_pfn;
   13.46 +    int tot_pages;
   13.47 +} dom_mem_req_t;
   13.48 +
   13.49 +typedef struct domain_launch
   13.50 +{
   13.51 +    unsigned int domain;
   13.52 +    unsigned long l2_pgt_addr;
   13.53 +    unsigned long virt_load_addr;
   13.54 +    unsigned long virt_shinfo_addr;
   13.55 +    unsigned long virt_startinfo_addr;
   13.56 +    char cmd_line[MAX_CMD_LEN];
   13.57 +} dom_meminfo_t;
   13.58 +
   13.59  typedef struct dom0_op_st
   13.60  {
   13.61      unsigned long cmd;
   13.62 @@ -27,6 +58,9 @@ typedef struct dom0_op_st
   13.63      {
   13.64          dom0_newdomain_t newdomain;
   13.65          dom0_killdomain_t killdomain;
   13.66 +        dom0_tsmap_t mapdomts;
   13.67 +        dom_mem_req_t reqdommem;
   13.68      }
   13.69      u;
   13.70  } dom0_op_t;
   13.71 +
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/hypervisor_defs.h	Wed Jan 15 14:21:52 2003 +0000
    14.3 @@ -0,0 +1,33 @@
    14.4 +
    14.5 +/******************************************************************************
    14.6 + * dom0_ops.h
    14.7 + * 
    14.8 + * Data structures defined in hypervisor code but needed in DOM0 as well. 
    14.9 + * Contents of this file should be kept in sync with the hypervisor ones
   14.10 + * unless you do not want something terrible :) to happen. 
   14.11 + * 
   14.12 + * Copyright (c) 2002, Keir Fraser & Boris Dragovic 
   14.13 + */
   14.14 +
   14.15 +
   14.16 +/* original version: xen-2.4.16/include/xeno/mm.h */
   14.17 +typedef struct pfn_info {
   14.18 +    struct list_head list;      /* ->mapping has some page lists. */
   14.19 +    unsigned long next;         /* used for threading pages belonging */
   14.20 +    unsigned long prev;         /* to same domain */
   14.21 +    unsigned long flags;        /* atomic flags. */
   14.22 +    unsigned long tot_count;    /* Total domain usage count. */
   14.23 +    unsigned long type_count;   /* pagetable/dir, or domain-writeable refs. */
   14.24 +} frame_table_t;
   14.25 +
   14.26 +extern frame_table_t * frame_table;
   14.27 +
   14.28 +typedef struct proc_data {
   14.29 +    unsigned int domain;
   14.30 +    unsigned long map_size;
   14.31 +} dom_procdata_t;
   14.32 +
   14.33 +typedef struct proc_mem_data {
   14.34 +    unsigned long pfn;
   14.35 +    int tot_pages;
   14.36 +} proc_memdata_t;
    15.1 --- a/xenolinux-2.4.16-sparse/arch/xeno/mm/Makefile	Wed Jan 15 00:21:24 2003 +0000
    15.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/mm/Makefile	Wed Jan 15 14:21:52 2003 +0000
    15.3 @@ -1,6 +1,7 @@
    15.4 +
    15.5  
    15.6  O_TARGET := mm.o
    15.7  
    15.8 -obj-y	 := init.o fault.o extable.o hypervisor.o
    15.9 +obj-y	 := init.o fault.o extable.o hypervisor.o get_unmapped_area.o mmu_context.o
   15.10  
   15.11  include $(TOPDIR)/Rules.make
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c	Wed Jan 15 14:21:52 2003 +0000
    16.3 @@ -0,0 +1,145 @@
    16.4 +
    16.5 +#include <linux/slab.h>
    16.6 +#include <linux/shm.h>
    16.7 +#include <linux/mman.h>
    16.8 +#include <linux/pagemap.h>
    16.9 +#include <linux/swap.h>
   16.10 +#include <linux/swapctl.h>
   16.11 +#include <linux/smp_lock.h>
   16.12 +#include <linux/init.h>
   16.13 +#include <linux/file.h>
   16.14 +#include <linux/fs.h>
   16.15 +#include <linux/personality.h>
   16.16 +
   16.17 +#include <asm/uaccess.h>
   16.18 +#include <asm/pgalloc.h>
   16.19 +
   16.20 +/*
   16.21 +static int direct_mapped(unsigned long addr)
   16.22 +{
   16.23 +    direct_mmap_node_t * node;
   16.24 +    struct list_head * curr;
   16.25 +    struct list_head * direct_list = &current->mm->context.direct_list;
   16.26 +
   16.27 +    curr = direct_list->next;
   16.28 +    while(curr != direct_list){
   16.29 +        node = list_entry(curr, direct_mmap_node_t, list);
   16.30 +        if(node->addr == addr)
   16.31 +            break;
   16.32 +        curr = curr->next;
   16.33 +    } 
   16.34 +
   16.35 +    if(curr == direct_list)
   16.36 +        return 0;
   16.37 +
   16.38 +    return 1;
   16.39 +}
   16.40 +*/
   16.41 +/*
   16.42 +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
   16.43 +{
   16.44 +	struct vm_area_struct *vma;
   16.45 +
   16.46 +	if (len > TASK_SIZE)
   16.47 +		return -ENOMEM;
   16.48 +
   16.49 +	if (addr) {
   16.50 +		addr = PAGE_ALIGN(addr);
   16.51 +		vma = find_vma(current->mm, addr);
   16.52 +		if (TASK_SIZE - len >= addr &&
   16.53 +		    (!vma || addr + len <= vma->vm_start))
   16.54 +			return addr;
   16.55 +	}
   16.56 +	addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
   16.57 +
   16.58 +	for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
   16.59 +		if (TASK_SIZE - len < addr)
   16.60 +			return -ENOMEM;
   16.61 +        
   16.62 +		if(current->pid > 100){
   16.63 +		printk(KERN_ALERT "bd240 debug: gua: vm addr found %lx\n", addr);
   16.64 +			printk(KERN_ALERT "bd240 debug: gua: first condition %d, %lx, %lx\n",vma, addr + len, vma->vm_start);
   16.65 +			printk(KERN_ALERT "bd240 debug: gua: second condition %d\n", direct_mapped(addr));
   16.66 +		}
   16.67 +		if ((!vma || addr + len <= vma->vm_start) && !direct_mapped(addr))
   16.68 +			return addr;
   16.69 +		
   16.70 +        addr = vma->vm_end;
   16.71 +	}
   16.72 +}
   16.73 +*/
   16.74 +struct list_head *find_direct(struct list_head *list, unsigned long addr)
   16.75 +{
   16.76 +	struct list_head * curr;
   16.77 +	struct list_head * direct_list = &current->mm->context.direct_list;
   16.78 +	direct_mmap_node_t * node;
   16.79 +
   16.80 +    for ( curr = direct_list->next; curr != direct_list; curr = curr->next )
   16.81 +    {
   16.82 +        node = list_entry(curr, direct_mmap_node_t, list);
   16.83 +        if( node->vm_start >= addr ){
   16.84 +			printk(KERN_ALERT "bd240 debug: find_direct: hit %lx\n", node->vm_start); 
   16.85 +			break;
   16.86 +		}
   16.87 +    }
   16.88 +
   16.89 +    return curr;
   16.90 +}
   16.91 +
   16.92 +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long
   16.93 +addr, unsigned long len, unsigned long pgoff, unsigned long flags)
   16.94 +{
   16.95 +    struct vm_area_struct *vma;
   16.96 +    direct_mmap_node_t * node;
   16.97 +    struct list_head * curr;
   16.98 +    struct list_head * direct_list = &current->mm->context.direct_list;
   16.99 +
  16.100 +    if (len > TASK_SIZE)
  16.101 +        return -ENOMEM;
  16.102 +
  16.103 +    if ( addr )
  16.104 +    {
  16.105 +        addr = PAGE_ALIGN(addr);
  16.106 +        vma = find_vma(current->mm, addr);
  16.107 +        curr = find_direct(direct_list, addr);
  16.108 +        node = list_entry(curr, direct_mmap_node_t, list);
  16.109 +        if ( (TASK_SIZE - len >= addr) &&
  16.110 +             (!vma || addr + len <= vma->vm_start) &&
  16.111 +             ((curr == direct_list) || addr + len <= node->vm_start) )
  16.112 +            return addr;
  16.113 +    }
  16.114 +
  16.115 +    addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
  16.116 +
  16.117 +
  16.118 +    /* Find first VMA and direct_map nodes with vm_start > addr */
  16.119 +    vma  = find_vma(current->mm, addr);
  16.120 +    curr = find_direct(direct_list, addr);
  16.121 +    node = list_entry(curr, direct_mmap_node_t, list);
  16.122 +
  16.123 +    for ( ; ; )
  16.124 +    {
  16.125 +        if ( TASK_SIZE - len < addr ) return -ENOMEM;
  16.126 +
  16.127 +        if ( vma && ((curr == direct_list) || (vma->vm_start < node->vm_start)))
  16.128 +        {
  16.129 +            /* Do we fit before VMA node? */
  16.130 +            if ( addr + len <= vma->vm_start ) return addr;
  16.131 +            addr = vma->vm_end;
  16.132 +            vma = vma->vm_next;
  16.133 +        }
  16.134 +        else if ( curr != direct_list )
  16.135 +        {
  16.136 +            /* Do we fit before direct_map node? */
  16.137 +            if ( addr + len <= node->vm_start) return addr;
  16.138 +            addr = node->vm_end;
  16.139 +            curr = curr->next;
  16.140 +            node = list_entry(curr, direct_mmap_node_t, list);
  16.141 +        }
  16.142 +        else
  16.143 +        {
  16.144 +            /* !vma && curr == direct_list */
  16.145 +            return addr;
  16.146 +        }
  16.147 +    }
  16.148 +}
    17.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/mm/mmu_context.c	Wed Jan 15 14:21:52 2003 +0000
    17.3 @@ -0,0 +1,26 @@
    17.4 +
    17.5 +#include <linux/slab.h>
    17.6 +#include <linux/list.h>
    17.7 +
    17.8 +int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
    17.9 +{
   17.10 +    INIT_LIST_HEAD(&mm->context.direct_list);
   17.11 +    return 0;
   17.12 +}
   17.13 +
   17.14 +/* just free all elements of list identifying directly mapped areas */
   17.15 +void destroy_context(struct mm_struct *mm)
   17.16 +{
   17.17 +    direct_mmap_node_t * node;
   17.18 +    struct list_head * curr;
   17.19 +    struct list_head * direct_list = &mm->context.direct_list;
   17.20 +
   17.21 +    curr = direct_list->next;
   17.22 +    while(curr != direct_list){
   17.23 +        node = list_entry(curr, direct_mmap_node_t, list);
   17.24 +        curr = curr->next;
   17.25 +        list_del(&node->list);
   17.26 +        kfree(node);
   17.27 +   }
   17.28 +
   17.29 +}
    18.1 --- a/xenolinux-2.4.16-sparse/include/asm-xeno/mmu.h	Wed Jan 15 00:21:24 2003 +0000
    18.2 +++ b/xenolinux-2.4.16-sparse/include/asm-xeno/mmu.h	Wed Jan 15 14:21:52 2003 +0000
    18.3 @@ -1,6 +1,16 @@
    18.4 +
    18.5  #ifndef __i386_MMU_H
    18.6  #define __i386_MMU_H
    18.7  
    18.8 +#include <linux/list.h>
    18.9 +
   18.10 +/* describes dirrectly mapped vma nodes */
   18.11 +typedef struct {
   18.12 +    struct list_head list;
   18.13 +    unsigned long vm_start;
   18.14 +	unsigned long vm_end;
   18.15 +} direct_mmap_node_t;
   18.16 +
   18.17  /*
   18.18   * The i386 doesn't have a mmu context, but
   18.19   * we put the segment information here.
   18.20 @@ -8,6 +18,7 @@
   18.21  typedef struct { 
   18.22  	void *segments;
   18.23  	unsigned long cpuvalid;
   18.24 +    struct list_head direct_list;
   18.25  } mm_context_t;
   18.26  
   18.27  #endif
    19.1 --- a/xenolinux-2.4.16-sparse/include/asm-xeno/mmu_context.h	Wed Jan 15 00:21:24 2003 +0000
    19.2 +++ b/xenolinux-2.4.16-sparse/include/asm-xeno/mmu_context.h	Wed Jan 15 14:21:52 2003 +0000
    19.3 @@ -1,3 +1,4 @@
    19.4 +
    19.5  #ifndef __I386_MMU_CONTEXT_H
    19.6  #define __I386_MMU_CONTEXT_H
    19.7  
    19.8 @@ -9,8 +10,12 @@
    19.9  /*
   19.10   * possibly do the LDT unload here?
   19.11   */
   19.12 -#define destroy_context(mm)		do { } while(0)
   19.13 -#define init_new_context(tsk,mm)	0
   19.14 +
   19.15 +extern int init_new_context(struct task_struct *tsk, struct mm_struct *);
   19.16 +extern void destroy_context(struct mm_struct *);
   19.17 +
   19.18 +//#define destroy_context(mm)		do { } while(0)
   19.19 +//#define init_new_context(tsk,mm)	0
   19.20  
   19.21  #ifdef CONFIG_SMP
   19.22  
    20.1 --- a/xenolinux-2.4.16-sparse/include/asm-xeno/pgtable.h	Wed Jan 15 00:21:24 2003 +0000
    20.2 +++ b/xenolinux-2.4.16-sparse/include/asm-xeno/pgtable.h	Wed Jan 15 14:21:52 2003 +0000
    20.3 @@ -3,6 +3,8 @@
    20.4  
    20.5  #include <linux/config.h>
    20.6  
    20.7 +#define HAVE_ARCH_UNMAPPED_AREA
    20.8 +
    20.9  /*
   20.10   * The Linux memory management assumes a three-level page table setup. On
   20.11   * the i386, we use that, but "fold" the mid level into the top-level page