ia64/xen-unstable

changeset 18134:1970781956c7

merge with xen-unstable.hg
author Isaku Yamahata <yamahata@valinux.co.jp>
date Wed Jul 23 12:10:20 2008 +0900 (2008-07-23)
parents 1e7a371cee11 f86941c1b523
children 01a3bba6b96d
files
line diff
     1.1 --- a/.hgignore	Wed Jul 23 11:21:47 2008 +0900
     1.2 +++ b/.hgignore	Wed Jul 23 12:10:20 2008 +0900
     1.3 @@ -138,6 +138,7 @@
     1.4  ^tools/firmware/vgabios/vbetables-gen$
     1.5  ^tools/firmware/vgabios/vbetables\.h$
     1.6  ^tools/flask/loadpolicy/flask-loadpolicy$
     1.7 +^tools/fs-back/fs-backend$
     1.8  ^tools/include/xen/.*$
     1.9  ^tools/include/xen-foreign/.*\.(c|h|size)$
    1.10  ^tools/include/xen-foreign/checker$
     2.1 --- a/extras/mini-os/arch/ia64/mm.c	Wed Jul 23 11:21:47 2008 +0900
     2.2 +++ b/extras/mini-os/arch/ia64/mm.c	Wed Jul 23 12:10:20 2008 +0900
     2.3 @@ -130,6 +130,11 @@ arch_init_demand_mapping_area(unsigned l
     2.4  	max_pfn = max_pfn;
     2.5  }
     2.6  
     2.7 +unsigned long allocate_ondemand(unsigned long n, unsigned long alignment)
     2.8 +{
     2.9 +        return 0;
    2.10 +}
    2.11 +
    2.12  /* Helper function used in gnttab.c. */
    2.13  void do_map_frames(unsigned long addr,
    2.14          unsigned long *f, unsigned long n, unsigned long stride,
     3.1 --- a/extras/mini-os/arch/ia64/sched.c	Wed Jul 23 11:21:47 2008 +0900
     3.2 +++ b/extras/mini-os/arch/ia64/sched.c	Wed Jul 23 12:10:20 2008 +0900
     3.3 @@ -34,6 +34,11 @@
     3.4  /* The function is implemented in fw.S */
     3.5  extern void thread_starter(void);
     3.6  
     3.7 +void stack_walk(void)
     3.8 +{
     3.9 +    /* TODO */
    3.10 +}
    3.11 +
    3.12  struct thread*
    3.13  arch_create_thread(char *name, void (*function)(void *), void *data)
    3.14  {
     4.1 --- a/extras/mini-os/arch/x86/mm.c	Wed Jul 23 11:21:47 2008 +0900
     4.2 +++ b/extras/mini-os/arch/x86/mm.c	Wed Jul 23 12:10:20 2008 +0900
     4.3 @@ -492,9 +492,7 @@ void do_map_frames(unsigned long addr,
     4.4      }
     4.5  }
     4.6  
     4.7 -void *map_frames_ex(unsigned long *f, unsigned long n, unsigned long stride,
     4.8 -	unsigned long increment, unsigned long alignment, domid_t id,
     4.9 -	int may_fail, unsigned long prot)
    4.10 +unsigned long allocate_ondemand(unsigned long n, unsigned long alignment)
    4.11  {
    4.12      unsigned long x;
    4.13      unsigned long y = 0;
    4.14 @@ -517,13 +515,24 @@ void *map_frames_ex(unsigned long *f, un
    4.15      }
    4.16      if (y != n) {
    4.17          printk("Failed to find %ld frames!\n", n);
    4.18 +        return 0;
    4.19 +    }
    4.20 +    return demand_map_area_start + x * PAGE_SIZE;
    4.21 +}
    4.22 +
    4.23 +void *map_frames_ex(unsigned long *f, unsigned long n, unsigned long stride,
    4.24 +	unsigned long increment, unsigned long alignment, domid_t id,
    4.25 +	int may_fail, unsigned long prot)
    4.26 +{
    4.27 +    unsigned long addr = allocate_ondemand(n, alignment);
    4.28 +
    4.29 +    if (!addr)
    4.30          return NULL;
    4.31 -    }
    4.32  
    4.33      /* Found it at x.  Map it in. */
    4.34 -    do_map_frames(demand_map_area_start + x * PAGE_SIZE, f, n, stride, increment, id, may_fail, prot);
    4.35 +    do_map_frames(addr, f, n, stride, increment, id, may_fail, prot);
    4.36  
    4.37 -    return (void *)(unsigned long)(demand_map_area_start + x * PAGE_SIZE);
    4.38 +    return (void *)addr;
    4.39  }
    4.40  
    4.41  static void clear_bootstrap(void)
     5.1 --- a/extras/mini-os/arch/x86/traps.c	Wed Jul 23 11:21:47 2008 +0900
     5.2 +++ b/extras/mini-os/arch/x86/traps.c	Wed Jul 23 12:10:20 2008 +0900
     5.3 @@ -112,7 +112,7 @@ void page_walk(unsigned long virt_addres
     5.4          printk("   L2 = %"PRIpte" (%p)  [offset = %lx]\n", page, tab, l2_table_offset(addr));
     5.5          
     5.6          page = tab[l1_table_offset(addr)];
     5.7 -        printk("    L1 = %"PRIpte" (%p)  [offset = %lx]\n", page, tab, l1_table_offset(addr));
     5.8 +        printk("    L1 = %"PRIpte" [offset = %lx]\n", page, l1_table_offset(addr));
     5.9  
    5.10  }
    5.11  
    5.12 @@ -155,6 +155,40 @@ static int handle_cow(unsigned long addr
    5.13  	return 0;
    5.14  }
    5.15  
    5.16 +static void do_stack_walk(unsigned long frame_base)
    5.17 +{
    5.18 +    unsigned long *frame = (void*) frame_base;
    5.19 +    printk("base is %#lx ", frame_base);
    5.20 +    printk("caller is %#lx\n", frame[1]);
    5.21 +    if (frame[0])
    5.22 +	do_stack_walk(frame[0]);
    5.23 +}
    5.24 +
    5.25 +void stack_walk(void)
    5.26 +{
    5.27 +    unsigned long bp;
    5.28 +#ifdef __x86_64__
    5.29 +    asm("movq %%rbp, %0":"=r"(bp));
    5.30 +#else
    5.31 +    asm("movl %%ebp, %0":"=r"(bp));
    5.32 +#endif
    5.33 +    do_stack_walk(bp);
    5.34 +}
    5.35 +
    5.36 +static void dump_mem(unsigned long addr)
    5.37 +{
    5.38 +    unsigned long i;
    5.39 +    if (addr < PAGE_SIZE)
    5.40 +	return;
    5.41 +
    5.42 +    for (i = ((addr)-16 ) & ~15; i < (((addr)+48 ) & ~15); i++)
    5.43 +    {
    5.44 +	if (!(i%16))
    5.45 +	    printk("\n%lx:", i);
    5.46 +	printk(" %02x", *(unsigned char *)i);
    5.47 +    }
    5.48 +    printk("\n");
    5.49 +}
    5.50  #define read_cr2() \
    5.51          (HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2)
    5.52  
    5.53 @@ -163,6 +197,7 @@ static int handling_pg_fault = 0;
    5.54  void do_page_fault(struct pt_regs *regs, unsigned long error_code)
    5.55  {
    5.56      unsigned long addr = read_cr2();
    5.57 +    struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_crash };
    5.58  
    5.59      if ((error_code & TRAP_PF_WRITE) && handle_cow(addr))
    5.60  	return;
    5.61 @@ -170,37 +205,61 @@ void do_page_fault(struct pt_regs *regs,
    5.62      /* If we are already handling a page fault, and got another one
    5.63         that means we faulted in pagetable walk. Continuing here would cause
    5.64         a recursive fault */       
    5.65 -    if(handling_pg_fault) 
    5.66 +    if(handling_pg_fault == 1) 
    5.67      {
    5.68          printk("Page fault in pagetable walk (access to invalid memory?).\n"); 
    5.69 -        do_exit();
    5.70 +        HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
    5.71      }
    5.72 -    handling_pg_fault = 1;
    5.73 +    handling_pg_fault++;
    5.74 +    barrier();
    5.75  
    5.76  #if defined(__x86_64__)
    5.77 -    printk("Page fault at linear address %p, rip %p, code %lx\n",
    5.78 -           addr, regs->rip, error_code);
    5.79 +    printk("Page fault at linear address %p, rip %p, regs %p, sp %p, our_sp %p, code %lx\n",
    5.80 +           addr, regs->rip, regs, regs->rsp, &addr, error_code);
    5.81  #else
    5.82 -    printk("Page fault at linear address %p, eip %p, code %lx\n",
    5.83 -           addr, regs->eip, error_code);
    5.84 +    printk("Page fault at linear address %p, eip %p, regs %p, sp %p, our_sp %p, code %lx\n",
    5.85 +           addr, regs->eip, regs, regs->esp, &addr, error_code);
    5.86  #endif
    5.87  
    5.88      dump_regs(regs);
    5.89 +#if defined(__x86_64__)
    5.90 +    do_stack_walk(regs->rbp);
    5.91 +    dump_mem(regs->rsp);
    5.92 +    dump_mem(regs->rbp);
    5.93 +    dump_mem(regs->rip);
    5.94 +#else
    5.95 +    do_stack_walk(regs->ebp);
    5.96 +    dump_mem(regs->esp);
    5.97 +    dump_mem(regs->ebp);
    5.98 +    dump_mem(regs->eip);
    5.99 +#endif
   5.100      page_walk(addr);
   5.101 -    do_exit();
   5.102 +    HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
   5.103      /* We should never get here ... but still */
   5.104 -    handling_pg_fault = 0;
   5.105 +    handling_pg_fault--;
   5.106  }
   5.107  
   5.108  void do_general_protection(struct pt_regs *regs, long error_code)
   5.109  {
   5.110 +    struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_crash };
   5.111  #ifdef __i386__
   5.112      printk("GPF eip: %p, error_code=%lx\n", regs->eip, error_code);
   5.113  #else    
   5.114      printk("GPF rip: %p, error_code=%lx\n", regs->rip, error_code);
   5.115  #endif
   5.116      dump_regs(regs);
   5.117 -    do_exit();
   5.118 +#if defined(__x86_64__)
   5.119 +    do_stack_walk(regs->rbp);
   5.120 +    dump_mem(regs->rsp);
   5.121 +    dump_mem(regs->rbp);
   5.122 +    dump_mem(regs->rip);
   5.123 +#else
   5.124 +    do_stack_walk(regs->ebp);
   5.125 +    dump_mem(regs->esp);
   5.126 +    dump_mem(regs->ebp);
   5.127 +    dump_mem(regs->eip);
   5.128 +#endif
   5.129 +    HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
   5.130  }
   5.131  
   5.132  
     6.1 --- a/extras/mini-os/fs-front.c	Wed Jul 23 11:21:47 2008 +0900
     6.2 +++ b/extras/mini-os/fs-front.c	Wed Jul 23 12:10:20 2008 +0900
     6.3 @@ -183,12 +183,13 @@ int fs_open(struct fs_import *import, ch
     6.4  
     6.5      /* Prepare request for the backend */
     6.6      back_req_id = reserve_fsif_request(import);
     6.7 -    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
     6.8 +    DEBUG("Backend request id=%d\n", back_req_id);
     6.9  
    6.10      /* Prepare our private request structure */
    6.11      priv_req_id = get_id_from_freelist(import->freelist);
    6.12      DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
    6.13      fsr = &import->requests[priv_req_id];
    6.14 +    DEBUG("gref id=%d\n", fsr->gref);
    6.15      fsr->thread = current;
    6.16      sprintf(fsr->page, "%s", file);
    6.17  
    6.18 @@ -221,7 +222,7 @@ int fs_close(struct fs_import *import, i
    6.19  
    6.20      /* Prepare request for the backend */
    6.21      back_req_id = reserve_fsif_request(import);
    6.22 -    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
    6.23 +    DEBUG("Backend request id=%d\n", back_req_id);
    6.24  
    6.25      /* Prepare our private request structure */
    6.26      priv_req_id = get_id_from_freelist(import->freelist);
    6.27 @@ -261,12 +262,13 @@ ssize_t fs_read(struct fs_import *import
    6.28  
    6.29      /* Prepare request for the backend */
    6.30      back_req_id = reserve_fsif_request(import);
    6.31 -    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
    6.32 +    DEBUG("Backend request id=%d\n", back_req_id);
    6.33  
    6.34      /* Prepare our private request structure */
    6.35      priv_req_id = get_id_from_freelist(import->freelist);
    6.36      DEBUG("Request id for fs_read call is: %d\n", priv_req_id);
    6.37      fsr = &import->requests[priv_req_id];
    6.38 +    DEBUG("gref=%d\n", fsr->gref);
    6.39      fsr->thread = current;
    6.40      memset(fsr->page, 0, PAGE_SIZE);
    6.41  
    6.42 @@ -307,12 +309,13 @@ ssize_t fs_write(struct fs_import *impor
    6.43  
    6.44      /* Prepare request for the backend */
    6.45      back_req_id = reserve_fsif_request(import);
    6.46 -    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
    6.47 +    DEBUG("Backend request id=%d\n", back_req_id);
    6.48  
    6.49      /* Prepare our private request structure */
    6.50      priv_req_id = get_id_from_freelist(import->freelist);
    6.51      DEBUG("Request id for fs_read call is: %d\n", priv_req_id);
    6.52      fsr = &import->requests[priv_req_id];
    6.53 +    DEBUG("gref=%d\n", fsr->gref);
    6.54      fsr->thread = current;
    6.55      memcpy(fsr->page, buf, len);
    6.56      BUG_ON(len > PAGE_SIZE);
    6.57 @@ -352,12 +355,13 @@ int fs_stat(struct fs_import *import,
    6.58  
    6.59      /* Prepare request for the backend */
    6.60      back_req_id = reserve_fsif_request(import);
    6.61 -    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
    6.62 +    DEBUG("Backend request id=%d\n", back_req_id);
    6.63  
    6.64      /* Prepare our private request structure */
    6.65      priv_req_id = get_id_from_freelist(import->freelist);
    6.66      DEBUG("Request id for fs_stat call is: %d\n", priv_req_id);
    6.67      fsr = &import->requests[priv_req_id];
    6.68 +    DEBUG("gref=%d\n", fsr->gref);
    6.69      fsr->thread = current;
    6.70      memset(fsr->page, 0, PAGE_SIZE);
    6.71  
    6.72 @@ -394,7 +398,7 @@ int fs_truncate(struct fs_import *import
    6.73  
    6.74      /* Prepare request for the backend */
    6.75      back_req_id = reserve_fsif_request(import);
    6.76 -    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
    6.77 +    DEBUG("Backend request id=%d\n", back_req_id);
    6.78  
    6.79      /* Prepare our private request structure */
    6.80      priv_req_id = get_id_from_freelist(import->freelist);
    6.81 @@ -432,12 +436,13 @@ int fs_remove(struct fs_import *import, 
    6.82  
    6.83      /* Prepare request for the backend */
    6.84      back_req_id = reserve_fsif_request(import);
    6.85 -    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
    6.86 +    DEBUG("Backend request id=%d\n", back_req_id);
    6.87  
    6.88      /* Prepare our private request structure */
    6.89      priv_req_id = get_id_from_freelist(import->freelist);
    6.90      DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
    6.91      fsr = &import->requests[priv_req_id];
    6.92 +    DEBUG("gref=%d\n", fsr->gref);
    6.93      fsr->thread = current;
    6.94      sprintf(fsr->page, "%s", file);
    6.95  
    6.96 @@ -475,12 +480,13 @@ int fs_rename(struct fs_import *import,
    6.97  
    6.98      /* Prepare request for the backend */
    6.99      back_req_id = reserve_fsif_request(import);
   6.100 -    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
   6.101 +    DEBUG("Backend request id=%d\n", back_req_id);
   6.102  
   6.103      /* Prepare our private request structure */
   6.104      priv_req_id = get_id_from_freelist(import->freelist);
   6.105      DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
   6.106      fsr = &import->requests[priv_req_id];
   6.107 +    DEBUG("gref=%d\n", fsr->gref);
   6.108      fsr->thread = current;
   6.109      sprintf(fsr->page, "%s%s%c%s%s", 
   6.110              old_header, old_file_name, '\0', new_header, new_file_name);
   6.111 @@ -521,12 +527,13 @@ int fs_create(struct fs_import *import, 
   6.112  
   6.113      /* Prepare request for the backend */
   6.114      back_req_id = reserve_fsif_request(import);
   6.115 -    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
   6.116 +    DEBUG("Backend request id=%d\n", back_req_id);
   6.117  
   6.118      /* Prepare our private request structure */
   6.119      priv_req_id = get_id_from_freelist(import->freelist);
   6.120      DEBUG("Request id for fs_create call is: %d\n", priv_req_id);
   6.121      fsr = &import->requests[priv_req_id];
   6.122 +    DEBUG("gref=%d\n", fsr->gref);
   6.123      fsr->thread = current;
   6.124      sprintf(fsr->page, "%s", name);
   6.125  
   6.126 @@ -566,12 +573,13 @@ char** fs_list(struct fs_import *import,
   6.127  
   6.128      /* Prepare request for the backend */
   6.129      back_req_id = reserve_fsif_request(import);
   6.130 -    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
   6.131 +    DEBUG("Backend request id=%d\n", back_req_id);
   6.132  
   6.133      /* Prepare our private request structure */
   6.134      priv_req_id = get_id_from_freelist(import->freelist);
   6.135      DEBUG("Request id for fs_list call is: %d\n", priv_req_id);
   6.136      fsr = &import->requests[priv_req_id];
   6.137 +    DEBUG("gref=%d\n", fsr->gref);
   6.138      fsr->thread = current;
   6.139      sprintf(fsr->page, "%s", name);
   6.140  
   6.141 @@ -615,7 +623,7 @@ int fs_chmod(struct fs_import *import, i
   6.142  
   6.143      /* Prepare request for the backend */
   6.144      back_req_id = reserve_fsif_request(import);
   6.145 -    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
   6.146 +    DEBUG("Backend request id=%d\n", back_req_id);
   6.147  
   6.148      /* Prepare our private request structure */
   6.149      priv_req_id = get_id_from_freelist(import->freelist);
   6.150 @@ -653,12 +661,13 @@ int64_t fs_space(struct fs_import *impor
   6.151  
   6.152      /* Prepare request for the backend */
   6.153      back_req_id = reserve_fsif_request(import);
   6.154 -    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
   6.155 +    DEBUG("Backend request id=%d\n", back_req_id);
   6.156  
   6.157      /* Prepare our private request structure */
   6.158      priv_req_id = get_id_from_freelist(import->freelist);
   6.159      DEBUG("Request id for fs_space is: %d\n", priv_req_id);
   6.160      fsr = &import->requests[priv_req_id];
   6.161 +    DEBUG("gref=%d\n", fsr->gref);
   6.162      fsr->thread = current;
   6.163      sprintf(fsr->page, "%s", location);
   6.164  
   6.165 @@ -691,7 +700,7 @@ int fs_sync(struct fs_import *import, in
   6.166  
   6.167      /* Prepare request for the backend */
   6.168      back_req_id = reserve_fsif_request(import);
   6.169 -    DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
   6.170 +    DEBUG("Backend request id=%d\n", back_req_id);
   6.171  
   6.172      /* Prepare our private request structure */
   6.173      priv_req_id = get_id_from_freelist(import->freelist);
   6.174 @@ -737,7 +746,7 @@ static void fsfront_handler(evtchn_port_
   6.175  
   6.176      DEBUG("Event from import [%d:%d].\n", import->dom_id, import->export_id);
   6.177  moretodo:   
   6.178 -    rp = import->ring.sring->req_prod;
   6.179 +    rp = import->ring.sring->rsp_prod;
   6.180      rmb(); /* Ensure we see queued responses up to 'rp'. */
   6.181      cons = import->ring.rsp_cons;
   6.182      while (cons != rp)
   6.183 @@ -747,7 +756,7 @@ moretodo:
   6.184  
   6.185          rsp = RING_GET_RESPONSE(&import->ring, cons); 
   6.186          DEBUG("Response at idx=%d to request id=%d, ret_val=%lx\n", 
   6.187 -            import->ring.rsp_cons, rsp->id, rsp->ret_val);
   6.188 +            cons, rsp->id, rsp->ret_val);
   6.189          req = &import->requests[rsp->id];
   6.190          memcpy(&req->shadow_rsp, rsp, sizeof(struct fsif_response));
   6.191          DEBUG("Waking up: %s\n", req->thread->name);
     7.1 --- a/extras/mini-os/include/ia64/traps.h	Wed Jul 23 11:21:47 2008 +0900
     7.2 +++ b/extras/mini-os/include/ia64/traps.h	Wed Jul 23 12:10:20 2008 +0900
     7.3 @@ -48,5 +48,7 @@ inline static void trap_fini(void)
     7.4  
     7.5  #include "ia64_cpu.h"
     7.6  
     7.7 +void stack_walk(void);
     7.8 +
     7.9  #endif /* !defined(_TRAPS_H_) */
    7.10  
     8.1 --- a/extras/mini-os/include/list.h	Wed Jul 23 11:21:47 2008 +0900
     8.2 +++ b/extras/mini-os/include/list.h	Wed Jul 23 12:10:20 2008 +0900
     8.3 @@ -24,6 +24,12 @@ struct list_head {
     8.4  	(ptr)->next = (ptr); (ptr)->prev = (ptr); \
     8.5  } while (0)
     8.6  
     8.7 +#define list_top(head, type, member)					  \
     8.8 +({ 									  \
     8.9 +	struct list_head *_head = (head);				  \
    8.10 +	list_empty(_head) ? NULL : list_entry(_head->next, type, member); \
    8.11 +})
    8.12 +
    8.13  /*
    8.14   * Insert a new entry between two known consecutive entries. 
    8.15   *
     9.1 --- a/extras/mini-os/include/mm.h	Wed Jul 23 11:21:47 2008 +0900
     9.2 +++ b/extras/mini-os/include/mm.h	Wed Jul 23 12:10:20 2008 +0900
     9.3 @@ -63,6 +63,7 @@ void arch_init_demand_mapping_area(unsig
     9.4  void arch_init_mm(unsigned long* start_pfn_p, unsigned long* max_pfn_p);
     9.5  void arch_init_p2m(unsigned long max_pfn_p);
     9.6  
     9.7 +unsigned long allocate_ondemand(unsigned long n, unsigned long alignment);
     9.8  /* map f[i*stride]+i*increment for i in 0..n-1, aligned on alignment pages */
     9.9  void *map_frames_ex(unsigned long *f, unsigned long n, unsigned long stride,
    9.10  	unsigned long increment, unsigned long alignment, domid_t id,
    10.1 --- a/extras/mini-os/include/x86/traps.h	Wed Jul 23 11:21:47 2008 +0900
    10.2 +++ b/extras/mini-os/include/x86/traps.h	Wed Jul 23 12:10:20 2008 +0900
    10.3 @@ -69,6 +69,7 @@ struct pt_regs {
    10.4  #endif
    10.5  
    10.6  void dump_regs(struct pt_regs *regs);
    10.7 +void stack_walk(void);
    10.8  
    10.9  #define TRAP_PF_PROT   0x1
   10.10  #define TRAP_PF_WRITE  0x2
    11.1 --- a/extras/mini-os/kernel.c	Wed Jul 23 11:21:47 2008 +0900
    11.2 +++ b/extras/mini-os/kernel.c	Wed Jul 23 12:10:20 2008 +0900
    11.3 @@ -592,6 +592,7 @@ void stop_kernel(void)
    11.4  void do_exit(void)
    11.5  {
    11.6      printk("Do_exit called!\n");
    11.7 +    stack_walk();
    11.8      for( ;; )
    11.9      {
   11.10          struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_crash };
    12.1 --- a/stubdom/Makefile	Wed Jul 23 11:21:47 2008 +0900
    12.2 +++ b/stubdom/Makefile	Wed Jul 23 12:10:20 2008 +0900
    12.3 @@ -85,7 +85,7 @@ newlib-$(NEWLIB_VERSION): newlib-$(NEWLI
    12.4  NEWLIB_STAMPFILE=$(CROSS_ROOT)/$(GNU_TARGET_ARCH)-xen-elf/lib/libc.a
    12.5  .PHONY: cross-newlib
    12.6  cross-newlib: $(NEWLIB_STAMPFILE)
    12.7 -$(NEWLIB_STAMPFILE): newlib-$(NEWLIB_VERSION)
    12.8 +$(NEWLIB_STAMPFILE): mk-headers newlib-$(NEWLIB_VERSION)
    12.9  	mkdir -p newlib-build
   12.10  	( cd newlib-build && \
   12.11  	  CC_FOR_TARGET="$(CC) $(TARGET_CPPFLAGS) $(TARGET_CFLAGS) $(NEWLIB_CFLAGS)" AR_FOR_TARGET=$(AR) LD_FOR_TARGET=$(LD) RANLIB_FOR_TARGET=$(RANLIB) ../newlib-$(NEWLIB_VERSION)/configure --prefix=$(CROSS_PREFIX) --verbose --target=$(GNU_TARGET_ARCH)-xen-elf --enable-newlib-io-long-long --disable-multilib && \
   12.12 @@ -205,7 +205,7 @@ TARGETS_MINIOS=$(addprefix mini-os-,$(TA
   12.13  
   12.14  .PHONY: libxc
   12.15  libxc: libxc/libxenctrl.a libxc/libxenguest.a
   12.16 -libxc/libxenctrl.a libxc/libxenguest.a:: cross-zlib mk-headers
   12.17 +libxc/libxenctrl.a libxc/libxenguest.a:: cross-zlib
   12.18  	CPPFLAGS="$(TARGET_CPPFLAGS)" CFLAGS="$(TARGET_CFLAGS)" $(MAKE) -C libxc
   12.19  
   12.20  #######
   12.21 @@ -213,7 +213,7 @@ libxc/libxenctrl.a libxc/libxenguest.a::
   12.22  #######
   12.23  
   12.24  .PHONY: ioemu
   12.25 -ioemu: cross-zlib cross-libpci mk-headers libxc
   12.26 +ioemu: cross-zlib cross-libpci libxc
   12.27  ifeq ($(CONFIG_QEMU),ioemu)
   12.28  	[ -f ioemu/config-host.mak ] || \
   12.29  	  ( cd ioemu ; \
   12.30 @@ -231,7 +231,7 @@ endif
   12.31  ######
   12.32  
   12.33  .PHONY: caml
   12.34 -caml: cross-newlib mk-headers
   12.35 +caml: $(CROSS_ROOT)
   12.36  	CPPFLAGS="$(TARGET_CPPFLAGS)" CFLAGS="$(TARGET_CFLAGS)" $(MAKE) -C $@ LWIPDIR=$(CURDIR)/lwip 
   12.37  
   12.38  ###
   12.39 @@ -239,7 +239,7 @@ caml: cross-newlib mk-headers
   12.40  ###
   12.41  
   12.42  .PHONY: c
   12.43 -c: cross-newlib mk-headers
   12.44 +c: $(CROSS_ROOT)
   12.45  	CPPFLAGS="$(TARGET_CPPFLAGS)" CFLAGS="$(TARGET_CFLAGS)" $(MAKE) -C $@ LWIPDIR=$(CURDIR)/lwip 
   12.46  
   12.47  ######
   12.48 @@ -257,7 +257,7 @@ grub-upstream: grub-$(GRUB_VERSION).tar.
   12.49  	done
   12.50  
   12.51  .PHONY: grub
   12.52 -grub: grub-upstream cross-newlib mk-headers
   12.53 +grub: grub-upstream $(CROSS_ROOT)
   12.54  	CPPFLAGS="$(TARGET_CPPFLAGS)" CFLAGS="$(TARGET_CFLAGS)" $(MAKE) -C $@
   12.55  
   12.56  ########
    13.1 --- a/stubdom/README	Wed Jul 23 11:21:47 2008 +0900
    13.2 +++ b/stubdom/README	Wed Jul 23 12:10:20 2008 +0900
    13.3 @@ -87,8 +87,8 @@ and any other parameter as wished.
    13.4  To run
    13.5  ======
    13.6  
    13.7 -mkdir -p /exports/usr/share/qemu
    13.8 -ln -s /usr/share/qemu/keymaps /exports/usr/share/qemu
    13.9 +mkdir -p /exports/usr/share/xen/qemu
   13.10 +ln -s /usr/share/xen/qemu/keymaps /exports/usr/share/xen/qemu
   13.11  mkdir -p /exports/var/lib
   13.12  ln -s /var/lib/xen /exports/var/lib
   13.13  /usr/sbin/fs-backend &
    14.1 --- a/tools/Makefile	Wed Jul 23 11:21:47 2008 +0900
    14.2 +++ b/tools/Makefile	Wed Jul 23 12:10:20 2008 +0900
    14.3 @@ -22,6 +22,7 @@ SUBDIRS-y += libaio
    14.4  SUBDIRS-y += blktap
    14.5  SUBDIRS-y += libfsimage
    14.6  SUBDIRS-$(LIBXENAPI_BINDINGS) += libxen
    14.7 +SUBDIRS-y += fs-back
    14.8  
    14.9  ifeq (ioemu,$(CONFIG_QEMU))
   14.10  SUBDIRS-$(CONFIG_IOEMU) += ioemu
    15.1 --- a/tools/examples/Makefile	Wed Jul 23 11:21:47 2008 +0900
    15.2 +++ b/tools/examples/Makefile	Wed Jul 23 12:10:20 2008 +0900
    15.3 @@ -28,6 +28,7 @@ XEN_SCRIPTS += blktap
    15.4  XEN_SCRIPTS += vtpm vtpm-delete
    15.5  XEN_SCRIPTS += xen-hotplug-cleanup
    15.6  XEN_SCRIPTS += external-device-migrate
    15.7 +XEN_SCRIPTS += vscsi
    15.8  XEN_SCRIPT_DATA = xen-script-common.sh locking.sh logging.sh
    15.9  XEN_SCRIPT_DATA += xen-hotplug-common.sh xen-network-common.sh vif-common.sh
   15.10  XEN_SCRIPT_DATA += block-common.sh vtpm-common.sh vtpm-hotplug-common.sh
    16.1 --- a/tools/examples/blktap	Wed Jul 23 11:21:47 2008 +0900
    16.2 +++ b/tools/examples/blktap	Wed Jul 23 12:10:20 2008 +0900
    16.3 @@ -69,7 +69,6 @@ fi
    16.4  if [ -L "$p" ]; then
    16.5      file=$(readlink -f "$p") || fatal "$p link does not exist."
    16.6  else
    16.7 -    [ -f "$p" ] || { fatal "$p file does not exist."; }
    16.8      file="$p"
    16.9  fi
   16.10  
    17.1 --- a/tools/examples/block	Wed Jul 23 11:21:47 2008 +0900
    17.2 +++ b/tools/examples/block	Wed Jul 23 12:10:20 2008 +0900
    17.3 @@ -209,6 +209,14 @@ case "$command" in
    17.4          FRONTEND_ID=$(xenstore_read "$XENBUS_PATH/frontend-id")
    17.5          FRONTEND_UUID=$(xenstore_read_default \
    17.6              "/local/domain/$FRONTEND_ID/vm" 'unknown')
    17.7 +
    17.8 +        if [ -L "$dev" ]
    17.9 +        then
   17.10 +          dev=$(readlink -f "$dev") || fatal "$dev link does not exist."
   17.11 +        fi
   17.12 +        test -e "$dev" || fatal "$dev does not exist."
   17.13 +        test -b "$dev" || fatal "$dev is not a block device."
   17.14 +
   17.15          claim_lock "block"
   17.16          check_device_sharing "$dev" "$mode"
   17.17  	write_dev "$dev"
    18.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.2 +++ b/tools/examples/stubdom-ExampleHVMDomain	Wed Jul 23 12:10:20 2008 +0900
    18.3 @@ -0,0 +1,14 @@
    18.4 +# Not to be started directly,
    18.5 +# See xmexample.hvm-stubdom and stubdom/README for more details
    18.6 +
    18.7 +kernel = "/usr/lib/xen/boot/ioemu-stubdom.gz"
    18.8 +
    18.9 +# Must be the same as in xmexample.hvm-stubdom, with a prepended vif for TCP/IP
   18.10 +# networking in the stubdomain itself, here just ''
   18.11 +vif = [ '', 'type=ioemu, bridge=xenbr0' ]
   18.12 +
   18.13 +# Set here instead of in xmexample.hvm-stubdom
   18.14 +disk = [ 'file:/var/images/min-el3-i386.img,hda,w', ',hdc:cdrom,r' ]
   18.15 +
   18.16 +# Actual output via PVFB
   18.17 +vfb = [ 'type=sdl' ]
    19.1 --- a/tools/examples/xmexample.hvm	Wed Jul 23 11:21:47 2008 +0900
    19.2 +++ b/tools/examples/xmexample.hvm	Wed Jul 23 12:10:20 2008 +0900
    19.3 @@ -119,7 +119,7 @@ disk = [ 'file:/var/images/min-el3-i386.
    19.4  
    19.5  #============================================================================
    19.6  
    19.7 -# New stuff
    19.8 +# Device Model to be used
    19.9  device_model = '/usr/' + arch_libdir + '/xen/bin/qemu-dm'
   19.10  
   19.11  #-----------------------------------------------------------------------------
   19.12 @@ -247,12 +247,14 @@ serial='pty'
   19.13  #vcpus=8
   19.14  #
   19.15  # Example for amd, expose a 5-core processor :
   19.16 -# cpuid = ['1:ebx=xxxxxxxx00000001xxxxxxxxxxxxxxxx,
   19.17 +# cpuid = ['1:ebx=xxxxxxxx00001010xxxxxxxxxxxxxxxx,
   19.18  #             edx=xxx1xxxxxxxxxxxxxxxxxxxxxxxxxxxx',
   19.19 -# '0x80000008:ecx=xxxxxxxxxxxxxxxx0000xxxx00001010']
   19.20 -#  - CPUID.1[EBX] : (Thread * Cores ) per processors
   19.21 -#  - CPUID.1[EDX][HT] : Enable HT
   19.22 -#  - CPUID.0x80000008[ECX] : Number of vcpus * 2
   19.23 +# '0x80000001:ecx=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1x',
   19.24 +# '0x80000008:ecx=xxxxxxxxxxxxxxxxxxxxxxxxxx001001']
   19.25 +#   - CPUID.1[EBX] : Threads per Core * Cores per Socket (2 * #vcpus)
   19.26 +#   - CPUID.1[EDX][HT] : Enable HT
   19.27 +#   - CPUID.0x80000001[CmpLegacy] : Use legacy method
   19.28 +#   - CPUID.0x80000008[ECX] : #vcpus * 2 - 1
   19.29  #vcpus=5
   19.30  #
   19.31  #  Downgrade the cpuid to make a better compatibility for migration :
    20.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.2 +++ b/tools/examples/xmexample.hvm-stubdom	Wed Jul 23 12:10:20 2008 +0900
    20.3 @@ -0,0 +1,320 @@
    20.4 +#  -*- mode: python; -*-
    20.5 +#============================================================================
    20.6 +# Python configuration setup for 'xm create'.
    20.7 +# This script sets the parameters used when a domain is created using 'xm create'.
    20.8 +# You use a separate script for each domain you want to create, or 
    20.9 +# you can set the parameters for the domain on the xm command line.
   20.10 +#============================================================================
   20.11 +#
   20.12 +# This is a version using a stubdomain for device model, see
   20.13 +# stubdom-ExampleHVMDomain and stubdom/README for more details
   20.14 +# The differences with xmexample.hvm are marked with "STUBDOM"
   20.15 +
   20.16 +#----------------------------------------------------------------------------
   20.17 +# Kernel image file.
   20.18 +kernel = "/usr/lib/xen/boot/hvmloader"
   20.19 +
   20.20 +# The domain build function. HVM domain uses 'hvm'.
   20.21 +builder='hvm'
   20.22 +
   20.23 +# Initial memory allocation (in megabytes) for the new domain.
   20.24 +#
   20.25 +# WARNING: Creating a domain with insufficient memory may cause out of
   20.26 +#          memory errors. The domain needs enough memory to boot kernel
   20.27 +#          and modules. Allocating less than 32MBs is not recommended.
   20.28 +memory = 128
   20.29 +
   20.30 +# Shadow pagetable memory for the domain, in MB.
   20.31 +# If not explicictly set, xend will pick an appropriate value.  
   20.32 +# Should be at least 2KB per MB of domain memory, plus a few MB per vcpu.
   20.33 +# shadow_memory = 8
   20.34 +
   20.35 +# A name for your domain. All domains must have different names.
   20.36 +name = "ExampleHVMDomain"
   20.37 +
   20.38 +# 128-bit UUID for the domain.  The default behavior is to generate a new UUID
   20.39 +# on each call to 'xm create'.
   20.40 +#uuid = "06ed00fe-1162-4fc4-b5d8-11993ee4a8b9"
   20.41 +
   20.42 +#-----------------------------------------------------------------------------
   20.43 +# The number of cpus guest platform has, default=1
   20.44 +#vcpus=1
   20.45 +
   20.46 +# Enable/disable HVM guest PAE, default=1 (enabled)
   20.47 +#pae=1
   20.48 +
   20.49 +# Enable/disable HVM guest ACPI, default=1 (enabled)
   20.50 +#acpi=1
   20.51 +
   20.52 +# Enable/disable HVM APIC mode, default=1 (enabled)
   20.53 +# Note that this option is ignored if vcpus > 1
   20.54 +#apic=1
   20.55 +
   20.56 +# List of which CPUS this domain is allowed to use, default Xen picks
   20.57 +#cpus = ""         # leave to Xen to pick
   20.58 +#cpus = "0"        # all vcpus run on CPU0
   20.59 +#cpus = "0-3,5,^1" # all vcpus run on cpus 0,2,3,5
   20.60 +#cpus = ["2", "3"] # VCPU0 runs on CPU2, VCPU1 runs on CPU3
   20.61 +
   20.62 +# Optionally define mac and/or bridge for the network interfaces.
   20.63 +# Random MACs are assigned if not given.
   20.64 +#vif = [ 'type=ioemu, mac=00:16:3e:00:00:11, bridge=xenbr0, model=ne2k_pci' ]
   20.65 +# type=ioemu specify the NIC is an ioemu device not netfront
   20.66 +vif = [ 'type=ioemu, bridge=xenbr0' ]
   20.67 +
   20.68 +#----------------------------------------------------------------------------
   20.69 +# Define the disk devices you want the domain to have access to, and
   20.70 +# what you want them accessible as.
   20.71 +# Each disk entry is of the form phy:UNAME,DEV,MODE
   20.72 +# where UNAME is the device, DEV is the device name the domain will see,
   20.73 +# and MODE is r for read-only, w for read-write.
   20.74 +#
   20.75 +# STUBDOM: do not put it here but in stubdom-ExampleHVMDomain
   20.76 +
   20.77 +#disk = [ 'phy:hda1,hda1,r' ]
   20.78 +#disk = [ 'file:/var/images/min-el3-i386.img,hda,w', ',hdc:cdrom,r' ]
   20.79 +
   20.80 +#----------------------------------------------------------------------------
   20.81 +# Configure the behaviour when a domain exits.  There are three 'reasons'
   20.82 +# for a domain to stop: poweroff, reboot, and crash.  For each of these you
   20.83 +# may specify:
   20.84 +#
   20.85 +#   "destroy",        meaning that the domain is cleaned up as normal;
   20.86 +#   "restart",        meaning that a new domain is started in place of the old
   20.87 +#                     one;
   20.88 +#   "preserve",       meaning that no clean-up is done until the domain is
   20.89 +#                     manually destroyed (using xm destroy, for example); or
   20.90 +#   "rename-restart", meaning that the old domain is not cleaned up, but is
   20.91 +#                     renamed and a new domain started in its place.
   20.92 +#
   20.93 +# In the event a domain stops due to a crash, you have the additional options:
   20.94 +#
   20.95 +#   "coredump-destroy", meaning dump the crashed domain's core and then destroy;
   20.96 +#   "coredump-restart', meaning dump the crashed domain's core and the restart.
   20.97 +#
   20.98 +# The default is
   20.99 +#
  20.100 +#   on_poweroff = 'destroy'
  20.101 +#   on_reboot   = 'restart'
  20.102 +#   on_crash    = 'restart'
  20.103 +#
  20.104 +# For backwards compatibility we also support the deprecated option restart
  20.105 +#
  20.106 +# restart = 'onreboot' means on_poweroff = 'destroy'
  20.107 +#                            on_reboot   = 'restart'
  20.108 +#                            on_crash    = 'destroy'
  20.109 +#
  20.110 +# restart = 'always'   means on_poweroff = 'restart'
  20.111 +#                            on_reboot   = 'restart'
  20.112 +#                            on_crash    = 'restart'
  20.113 +#
  20.114 +# restart = 'never'    means on_poweroff = 'destroy'
  20.115 +#                            on_reboot   = 'destroy'
  20.116 +#                            on_crash    = 'destroy'
  20.117 +
  20.118 +#on_poweroff = 'destroy'
  20.119 +#on_reboot   = 'restart'
  20.120 +#on_crash    = 'restart'
  20.121 +
  20.122 +#============================================================================
  20.123 +
  20.124 +# Device Model to be used
  20.125 +#
  20.126 +# STUBDOM: this is a script that creates the stub domain running the device
  20.127 +# model
  20.128 +device_model = '/usr/lib/xen/bin/stubdom-dm'
  20.129 +
  20.130 +#-----------------------------------------------------------------------------
  20.131 +# boot on floppy (a), hard disk (c), Network (n) or CD-ROM (d) 
  20.132 +# default: hard disk, cd-rom, floppy
  20.133 +#boot="cda"
  20.134 +
  20.135 +#-----------------------------------------------------------------------------
  20.136 +#  write to temporary files instead of disk image files
  20.137 +#snapshot=1
  20.138 +
  20.139 +#----------------------------------------------------------------------------
  20.140 +# enable SDL library for graphics, default = 0
  20.141 +# 
  20.142 +# STUBDOM: always disable since the stub domain doesn't have direct X access
  20.143 +sdl=0
  20.144 +
  20.145 +#----------------------------------------------------------------------------
  20.146 +# enable OpenGL for texture rendering inside the SDL window, default = 1
  20.147 +# valid only if sdl is enabled.
  20.148 +# 
  20.149 +# STUBDOM: always disable for the same reason
  20.150 +opengl=0
  20.151 +
  20.152 +#----------------------------------------------------------------------------
  20.153 +# enable VNC library for graphics, default = 1
  20.154 +vnc=0
  20.155 +
  20.156 +#----------------------------------------------------------------------------
  20.157 +# address that should be listened on for the VNC server if vnc is set.
  20.158 +# default is to use 'vnc-listen' setting from /etc/xen/xend-config.sxp
  20.159 +#vnclisten="127.0.0.1"
  20.160 +
  20.161 +#----------------------------------------------------------------------------
  20.162 +# set VNC display number, default = domid
  20.163 +#vncdisplay=1
  20.164 +
  20.165 +#----------------------------------------------------------------------------
  20.166 +# try to find an unused port for the VNC server, default = 1
  20.167 +#vncunused=1
  20.168 +
  20.169 +#----------------------------------------------------------------------------
  20.170 +# enable spawning vncviewer for domain's console
  20.171 +# (only valid when vnc=1), default = 0
  20.172 +#vncconsole=0
  20.173 +
  20.174 +#----------------------------------------------------------------------------
  20.175 +# set password for domain's VNC console
  20.176 +# default is depents on vncpasswd in xend-config.sxp
  20.177 +vncpasswd=''
  20.178 +
  20.179 +#----------------------------------------------------------------------------
  20.180 +# no graphics, use serial port
  20.181 +#nographic=0
  20.182 +
  20.183 +#----------------------------------------------------------------------------
  20.184 +# enable stdvga, default = 0 (use cirrus logic device model)
  20.185 +stdvga=0
  20.186 +
  20.187 +#-----------------------------------------------------------------------------
  20.188 +#   serial port re-direct to pty deivce, /dev/pts/n 
  20.189 +#   then xm console or minicom can connect
  20.190 +# 
  20.191 +# STUBDOM: always disable as the stub domain doesn't have access to dom0's
  20.192 +# ptys
  20.193 +#serial='pty'
  20.194 +
  20.195 +
  20.196 +#-----------------------------------------------------------------------------
  20.197 +#   Qemu Monitor, default is disable
  20.198 +#   Use ctrl-alt-2 to connect
  20.199 +#monitor=1
  20.200 +
  20.201 +
  20.202 +#-----------------------------------------------------------------------------
  20.203 +#   enable sound card support, [sb16|es1370|all|..,..], default none
  20.204 +# 
  20.205 +# STUBDOM: not supported
  20.206 +#soundhw='sb16'
  20.207 +
  20.208 +
  20.209 +#-----------------------------------------------------------------------------
  20.210 +#    set the real time clock to local time [default=0 i.e. set to utc]
  20.211 +#localtime=1
  20.212 +
  20.213 +
  20.214 +#-----------------------------------------------------------------------------
  20.215 +#    set the real time clock offset in seconds [default=0 i.e. same as dom0]
  20.216 +#rtc_timeoffset=3600
  20.217 +
  20.218 +#-----------------------------------------------------------------------------
  20.219 +#    start in full screen
  20.220 +#full-screen=1   
  20.221 +
  20.222 +
  20.223 +#-----------------------------------------------------------------------------
  20.224 +#   Enable USB support (specific devices specified at runtime through the
  20.225 +#			monitor window)
  20.226 +#usb=1
  20.227 +
  20.228 +#   Enable USB mouse support (only enable one of the following, `mouse' for
  20.229 +#			      PS/2 protocol relative mouse, `tablet' for
  20.230 +#			      absolute mouse)
  20.231 +#usbdevice='mouse'
  20.232 +#usbdevice='tablet'
  20.233 +
  20.234 +#-----------------------------------------------------------------------------
  20.235 +#   Set keyboard layout, default is en-us keyboard. 
  20.236 +#keymap='ja'
  20.237 +
  20.238 +#-----------------------------------------------------------------------------
  20.239 +#   Configure guest CPUID responses:
  20.240 +#
  20.241 +#cpuid=[ '1:ecx=xxxxxxxxxxx00xxxxxxxxxxxxxxxxxxx,
  20.242 +#           eax=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' ]
  20.243 +# - Unset the SSE4 features (CPUID.1[ECX][20-19])
  20.244 +# - Default behaviour for all other bits in ECX And EAX registers.
  20.245 +# 
  20.246 +# Each successive character represent a lesser-significant bit:
  20.247 +#  '1' -> force the corresponding bit to 1
  20.248 +#  '0' -> force to 0
  20.249 +#  'x' -> Get a safe value (pass through and mask with the default policy)
  20.250 +#  'k' -> pass through the host bit value
  20.251 +#  's' -> as 'k' but preserve across save/restore and migration
  20.252 +# 
  20.253 +#   Expose to the guest multi-core cpu instead of multiple processors
  20.254 +# Example for intel, expose a 8-core processor :
  20.255 +#cpuid=['1:edx=xxx1xxxxxxxxxxxxxxxxxxxxxxxxxxxx,
  20.256 +#          ebx=xxxxxxxx00010000xxxxxxxxxxxxxxxx',
  20.257 +#     '4,0:eax=001111xxxxxxxxxxxxxxxxxxxxxxxxxx']
  20.258 +#  - CPUID.1[EDX][HT] : Enable HT
  20.259 +#  - CPUID.1[EBX] : Number of vcpus * 2
  20.260 +#  - CPUID.4,0[EAX] : Number of vcpus * 2 - 1
  20.261 +#vcpus=8
  20.262 +#
  20.263 +# Example for amd, expose a 5-core processor :
  20.264 +# cpuid = ['1:ebx=xxxxxxxx00001010xxxxxxxxxxxxxxxx,
  20.265 +#             edx=xxx1xxxxxxxxxxxxxxxxxxxxxxxxxxxx',
  20.266 +# '0x80000001:ecx=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1x',
  20.267 +# '0x80000008:ecx=xxxxxxxxxxxxxxxxxxxxxxxxxx001001']
  20.268 +#   - CPUID.1[EBX] : Threads per Core * Cores per Socket (2 * #vcpus)
  20.269 +#   - CPUID.1[EDX][HT] : Enable HT
  20.270 +#   - CPUID.0x80000001[CmpLegacy] : Use legacy method
  20.271 +#   - CPUID.0x80000008[ECX] : #vcpus * 2 - 1
  20.272 +#vcpus=5
  20.273 +#
  20.274 +#  Downgrade the cpuid to make a better compatibility for migration :
  20.275 +# Look like a generic 686 :
  20.276 +# cpuid = [ '0:eax=0x3,ebx=0x0,ecx=0x0,edx=0x0',
  20.277 +#           '1:eax=0x06b1,
  20.278 +#              ecx=xxxxxxxxxx0000xx00xxx0000000xx0,
  20.279 +#              edx=xx00000xxxxxxx0xxxxxxxxx0xxxxxx',
  20.280 +#           '4:eax=0x3,ebx=0x0,ecx=0x0,edx=0x0',
  20.281 +#  '0x80000000:eax=0x3,ebx=0x0,ecx=0x0,edx=0x0']
  20.282 +#  with the highest leaf
  20.283 +#  - CPUID.0[EAX] : Set the highest leaf
  20.284 +#  - CPUID.1[EAX] : 686 
  20.285 +#  - CPUID.1[ECX] : Mask some features
  20.286 +#  - CPUID.1[EDX] : Mask some features
  20.287 +#  - CPUID.4 : Reply like the highest leaf, in our case CPUID.3
  20.288 +#  - CPUID.0x80000000 : No extension we are on a Pentium III, reply like the
  20.289 +#  highest leaf (CPUID.3).
  20.290 +#
  20.291 +#   Configure host CPUID consistency checks, which must be satisfied for this
  20.292 +#   VM to be allowed to run on this host's processor type:
  20.293 +#cpuid_check=[ '1:ecx=xxxxxxxxxxxxxxxxxxxxxxxxxx1xxxxx' ]
  20.294 +# - Host must have VMX feature flag set
  20.295 +#
  20.296 +# The format is similar to the above for 'cpuid':
  20.297 +#  '1' -> the bit must be '1'
  20.298 +#  '0' -> the bit must be '0'
  20.299 +#  'x' -> we don't care (do not check)
  20.300 +#  's' -> the bit must be the same as on the host that started this VM
  20.301 +
  20.302 +
  20.303 +#-----------------------------------------------------------------------------
  20.304 +#   Configure PVSCSI devices:
  20.305 +#
  20.306 +#vscsi=[ 'PDEV, VDEV' ]
  20.307 +#
  20.308 +#   PDEV   gives physical SCSI device to be attached to specified guest
  20.309 +#          domain by one of the following identifier format.
  20.310 +#          - XX:XX:XX:XX (4-tuples with decimal notation which shows
  20.311 +#                          "host:channel:target:lun")
  20.312 +#          - /dev/sdxx or sdx
  20.313 +#          - /dev/stxx or stx
  20.314 +#          - /dev/sgxx or sgx
  20.315 +#          - result of 'scsi_id -gu -s'.
  20.316 +#            ex. # scsi_id -gu -s /block/sdb
  20.317 +#                  36000b5d0006a0000006a0257004c0000
  20.318 +#
  20.319 +#   VDEV   gives virtual SCSI device by 4-tuples (XX:XX:XX:XX) as 
  20.320 +#          which the specified guest domain recognize.
  20.321 +#
  20.322 +
  20.323 +#vscsi = [ '/dev/sdx, 0:0:0:0' ]
    21.1 --- a/tools/flask/libflask/flask_op.c	Wed Jul 23 11:21:47 2008 +0900
    21.2 +++ b/tools/flask/libflask/flask_op.c	Wed Jul 23 12:10:20 2008 +0900
    21.3 @@ -22,7 +22,7 @@
    21.4  #include <flask.h>
    21.5  #include <xenctrl.h>
    21.6  
    21.7 -int flask_load(int xc_handle, char *buf, int size)
    21.8 +int flask_load(int xc_handle, char *buf, uint32_t size)
    21.9  {
   21.10      int err;
   21.11      flask_op_t op;
   21.12 @@ -37,7 +37,7 @@ int flask_load(int xc_handle, char *buf,
   21.13      return 0;
   21.14  }
   21.15  
   21.16 -int flask_context_to_sid(int xc_handle, char *buf, int size, uint32_t *sid)
   21.17 +int flask_context_to_sid(int xc_handle, char *buf, uint32_t size, uint32_t *sid)
   21.18  {
   21.19      int err;
   21.20      flask_op_t op;
   21.21 @@ -54,7 +54,7 @@ int flask_context_to_sid(int xc_handle, 
   21.22      return 0;
   21.23  }
   21.24  
   21.25 -int flask_sid_to_context(int xc_handle, int sid, char *buf, int size)
   21.26 +int flask_sid_to_context(int xc_handle, int sid, char *buf, uint32_t size)
   21.27  {
   21.28      int err;
   21.29      flask_op_t op;
    22.1 --- a/tools/flask/libflask/include/flask.h	Wed Jul 23 11:21:47 2008 +0900
    22.2 +++ b/tools/flask/libflask/include/flask.h	Wed Jul 23 12:10:20 2008 +0900
    22.3 @@ -15,8 +15,8 @@
    22.4  #include <xen/xen.h>
    22.5  #include <xen/xsm/flask_op.h>
    22.6  
    22.7 -int flask_load(int xc_handle, char *buf, int size);
    22.8 -int flask_context_to_sid(int xc_handle, char *buf, int size, uint32_t *sid);
    22.9 -int flask_sid_to_context(int xc_handle, int sid, char *buf, int size);
   22.10 +int flask_load(int xc_handle, char *buf, uint32_t size);
   22.11 +int flask_context_to_sid(int xc_handle, char *buf, uint32_t size, uint32_t *sid);
   22.12 +int flask_sid_to_context(int xc_handle, int sid, char *buf, uint32_t size);
   22.13  
   22.14  #endif /* __FLASK_H__ */
    23.1 --- a/tools/fs-back/fs-backend.c	Wed Jul 23 11:21:47 2008 +0900
    23.2 +++ b/tools/fs-back/fs-backend.c	Wed Jul 23 12:10:20 2008 +0900
    23.3 @@ -239,7 +239,7 @@ static void handle_connection(int fronte
    23.4                                      mount->dom_id,
    23.5                                      mount->gref,
    23.6                                      PROT_READ | PROT_WRITE);
    23.7 -    BACK_RING_INIT(&mount->ring, sring, PAGE_SIZE);
    23.8 +    BACK_RING_INIT(&mount->ring, sring, XC_PAGE_SIZE);
    23.9      mount->nr_entries = mount->ring.nr_ents; 
   23.10      for (i = 0; i < MAX_FDS; i++)
   23.11          mount->fds[i] = -1;
    24.1 --- a/tools/fs-back/fs-ops.c	Wed Jul 23 11:21:47 2008 +0900
    24.2 +++ b/tools/fs-back/fs-ops.c	Wed Jul 23 12:10:20 2008 +0900
    24.3 @@ -515,9 +515,9 @@ void dispatch_list(struct mount *mount, 
    24.4      /* If there was any error with reading the directory, errno will be set */
    24.5      error_code = errno;
    24.6      /* Copy file names of the remaining non-NULL dirents into buf */
    24.7 -    assert(NAME_MAX < PAGE_SIZE >> 1);
    24.8 +    assert(NAME_MAX < XC_PAGE_SIZE >> 1);
    24.9      while(dirent != NULL && 
   24.10 -            (PAGE_SIZE - ((unsigned long)buf & PAGE_MASK) > NAME_MAX))
   24.11 +            (XC_PAGE_SIZE - ((unsigned long)buf & XC_PAGE_MASK) > NAME_MAX))
   24.12      {
   24.13          int curr_length = strlen(dirent->d_name) + 1;
   24.14          
    25.1 --- a/tools/ioemu/hw/pc.c	Wed Jul 23 11:21:47 2008 +0900
    25.2 +++ b/tools/ioemu/hw/pc.c	Wed Jul 23 12:10:20 2008 +0900
    25.3 @@ -591,9 +591,9 @@ static void load_linux(const char *kerne
    25.4              "qemu: real_addr     = %#zx\n"
    25.5              "qemu: cmdline_addr  = %#zx\n"
    25.6              "qemu: prot_addr     = %#zx\n",
    25.7 -            real_addr,
    25.8 -            cmdline_addr,
    25.9 -            prot_addr);
   25.10 +            (size_t)real_addr,
   25.11 +            (size_t)cmdline_addr,
   25.12 +            (size_t)prot_addr);
   25.13  
   25.14      /* highest address for loading the initrd */
   25.15      if (protocol >= 0x203)
    26.1 --- a/tools/ioemu/hw/vga.c	Wed Jul 23 11:21:47 2008 +0900
    26.2 +++ b/tools/ioemu/hw/vga.c	Wed Jul 23 12:10:20 2008 +0900
    26.3 @@ -1511,51 +1511,52 @@ static void vga_draw_graphic(VGAState *s
    26.4             width, height, v, line_offset, s->cr[9], s->cr[0x17], s->line_compare, s->sr[0x01]);
    26.5  #endif
    26.6  
    26.7 -    if (height - 1 > s->line_compare || multi_run || (s->cr[0x17] & 3) != 3
    26.8 -            || !s->lfb_addr) {
    26.9 -        /* Tricky things happen, just track all video memory */
   26.10 -        start = 0;
   26.11 -        end = s->vram_size;
   26.12 -    } else {
   26.13 -        /* Tricky things won't have any effect, i.e. we are in the very simple
   26.14 -         * (and very usual) case of a linear buffer. */
   26.15 -        /* use page table dirty bit tracking for the LFB plus border */
   26.16 -        start = (s->start_addr * 4) & TARGET_PAGE_MASK;
   26.17 -        end = ((s->start_addr * 4 + height * line_offset) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
   26.18 -    }
   26.19 -
   26.20 -    for (y = 0 ; y < start; y += TARGET_PAGE_SIZE)
   26.21 -        /* We will not read that anyway. */
   26.22 -        cpu_physical_memory_set_dirty(s->vram_offset + y);
   26.23 +    if (s->lfb_addr) {
   26.24 +        if (height - 1 > s->line_compare || multi_run || (s->cr[0x17] & 3) != 3) {
   26.25 +            /* Tricky things happen, just track all video memory */
   26.26 +            start = 0;
   26.27 +            end = s->vram_size;
   26.28 +        } else {
   26.29 +            /* Tricky things won't have any effect, i.e. we are in the very simple
   26.30 +             * (and very usual) case of a linear buffer. */
   26.31 +            /* use page table dirty bit tracking for the LFB plus border */
   26.32 +            start = (s->start_addr * 4) & TARGET_PAGE_MASK;
   26.33 +            end = ((s->start_addr * 4 + height * line_offset) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
   26.34 +        }
   26.35  
   26.36 -    {
   26.37 -        unsigned long npages = (end - y) / TARGET_PAGE_SIZE;
   26.38 -        const int width = sizeof(unsigned long) * 8;
   26.39 -        unsigned long bitmap[(npages + width - 1) / width];
   26.40 -        int err;
   26.41 +        for (y = 0 ; y < start; y += TARGET_PAGE_SIZE)
   26.42 +            /* We will not read that anyway. */
   26.43 +            cpu_physical_memory_set_dirty(s->vram_offset + y);
   26.44  
   26.45 -        if (!(err = xc_hvm_track_dirty_vram(xc_handle, domid,
   26.46 -                    (s->lfb_addr + y) / TARGET_PAGE_SIZE, npages, bitmap))) {
   26.47 -            int i, j;
   26.48 -            for (i = 0; i < sizeof(bitmap) / sizeof(*bitmap); i++) {
   26.49 -                unsigned long map = bitmap[i];
   26.50 -                for (j = i * width; map && j < npages; map >>= 1, j++)
   26.51 -                    if (map & 1)
   26.52 -                        cpu_physical_memory_set_dirty(s->vram_offset + y
   26.53 -                            + j * TARGET_PAGE_SIZE);
   26.54 +        {
   26.55 +            unsigned long npages = (end - y) / TARGET_PAGE_SIZE;
   26.56 +            const int width = sizeof(unsigned long) * 8;
   26.57 +            unsigned long bitmap[(npages + width - 1) / width];
   26.58 +            int err;
   26.59 +
   26.60 +            if (!(err = xc_hvm_track_dirty_vram(xc_handle, domid,
   26.61 +                        (s->lfb_addr + y) / TARGET_PAGE_SIZE, npages, bitmap))) {
   26.62 +                int i, j;
   26.63 +                for (i = 0; i < sizeof(bitmap) / sizeof(*bitmap); i++) {
   26.64 +                    unsigned long map = bitmap[i];
   26.65 +                    for (j = i * width; map && j < npages; map >>= 1, j++)
   26.66 +                        if (map & 1)
   26.67 +                            cpu_physical_memory_set_dirty(s->vram_offset + y
   26.68 +                                + j * TARGET_PAGE_SIZE);
   26.69 +                }
   26.70 +                y += npages * TARGET_PAGE_SIZE;
   26.71 +            } else {
   26.72 +                /* ENODATA just means we have changed mode and will succeed
   26.73 +                 * next time */
   26.74 +                if (err != -ENODATA)
   26.75 +                    fprintf(stderr, "track_dirty_vram(%lx, %lx) failed (%d)\n", s->lfb_addr + y, npages, err);
   26.76              }
   26.77 -            y += npages * TARGET_PAGE_SIZE;
   26.78 -        } else {
   26.79 -            /* ENODATA just means we have changed mode and will succeed
   26.80 -             * next time */
   26.81 -            if (err != -ENODATA)
   26.82 -                fprintf(stderr, "track_dirty_vram(%lx, %lx) failed (%d)\n", s->lfb_addr + y, npages, err);
   26.83          }
   26.84 -    }
   26.85  
   26.86 -    for ( ; y < s->vram_size; y += TARGET_PAGE_SIZE)
   26.87 -        /* We will not read that anyway. */
   26.88 -        cpu_physical_memory_set_dirty(s->vram_offset + y);
   26.89 +        for ( ; y < s->vram_size; y += TARGET_PAGE_SIZE)
   26.90 +            /* We will not read that anyway. */
   26.91 +            cpu_physical_memory_set_dirty(s->vram_offset + y);
   26.92 +    }
   26.93  
   26.94      addr1 = (s->start_addr * 4);
   26.95      bwidth = (width * bits + 7) / 8;
    27.1 --- a/tools/libxc/xc_linux.c	Wed Jul 23 11:21:47 2008 +0900
    27.2 +++ b/tools/libxc/xc_linux.c	Wed Jul 23 12:10:20 2008 +0900
    27.3 @@ -418,9 +418,10 @@ void *xc_gnttab_map_grant_ref(int xcg_ha
    27.4      return addr;
    27.5  }
    27.6  
    27.7 -void *xc_gnttab_map_grant_refs(int xcg_handle,
    27.8 +static void *do_gnttab_map_grant_refs(int xcg_handle,
    27.9                                 uint32_t count,
   27.10                                 uint32_t *domids,
   27.11 +                               int domids_stride,
   27.12                                 uint32_t *refs,
   27.13                                 int prot)
   27.14  {
   27.15 @@ -435,7 +436,7 @@ void *xc_gnttab_map_grant_refs(int xcg_h
   27.16  
   27.17      for ( i = 0; i < count; i++ )
   27.18      {
   27.19 -        map->refs[i].domid = domids[i];
   27.20 +        map->refs[i].domid = domids[i * domids_stride];
   27.21          map->refs[i].ref   = refs[i];
   27.22      }
   27.23  
   27.24 @@ -464,6 +465,24 @@ void *xc_gnttab_map_grant_refs(int xcg_h
   27.25      return addr;
   27.26  }
   27.27  
   27.28 +void *xc_gnttab_map_grant_refs(int xcg_handle,
   27.29 +                               uint32_t count,
   27.30 +                               uint32_t *domids,
   27.31 +                               uint32_t *refs,
   27.32 +                               int prot)
   27.33 +{
   27.34 +    return do_gnttab_map_grant_refs(xcg_handle, count, domids, 1, refs, prot);
   27.35 +}
   27.36 +
   27.37 +void *xc_gnttab_map_domain_grant_refs(int xcg_handle,
   27.38 +                               uint32_t count,
   27.39 +                               uint32_t domid,
   27.40 +                               uint32_t *refs,
   27.41 +                               int prot)
   27.42 +{
   27.43 +    return do_gnttab_map_grant_refs(xcg_handle, count, &domid, 0, refs, prot);
   27.44 +}
   27.45 +
   27.46  int xc_gnttab_munmap(int xcg_handle,
   27.47                       void *start_address,
   27.48                       uint32_t count)
    28.1 --- a/tools/libxc/xenctrl.h	Wed Jul 23 11:21:47 2008 +0900
    28.2 +++ b/tools/libxc/xenctrl.h	Wed Jul 23 12:10:20 2008 +0900
    28.3 @@ -865,6 +865,23 @@ void *xc_gnttab_map_grant_refs(int xcg_h
    28.4                                 uint32_t *refs,
    28.5                                 int prot);
    28.6  
    28.7 +/**
    28.8 + * Memory maps one or more grant references from one domain to a
    28.9 + * contiguous local address range. Mappings should be unmapped with
   28.10 + * xc_gnttab_munmap.  Returns NULL on failure.
   28.11 + *
   28.12 + * @parm xcg_handle a handle on an open grant table interface
   28.13 + * @parm count the number of grant references to be mapped
   28.14 + * @parm domid the domain to map memory from
   28.15 + * @parm refs an array of @count grant references to be mapped
   28.16 + * @parm prot same flag as in mmap()
   28.17 + */
   28.18 +void *xc_gnttab_map_domain_grant_refs(int xcg_handle,
   28.19 +                                      uint32_t count,
   28.20 +                                      uint32_t domid,
   28.21 +                                      uint32_t *refs,
   28.22 +                                      int prot);
   28.23 +
   28.24  /*
   28.25   * Unmaps the @count pages starting at @start_address, which were mapped by a
   28.26   * call to xc_gnttab_map_grant_ref or xc_gnttab_map_grant_refs. Returns zero
    29.1 --- a/tools/python/xen/xend/XendNode.py	Wed Jul 23 11:21:47 2008 +0900
    29.2 +++ b/tools/python/xen/xend/XendNode.py	Wed Jul 23 12:10:20 2008 +0900
    29.3 @@ -23,6 +23,7 @@ import xen.lowlevel.xc
    29.4  from xen.util import Brctl
    29.5  from xen.util import pci as PciUtil
    29.6  from xen.xend import XendAPIStore
    29.7 +from xen.xend import osdep
    29.8  
    29.9  import uuid, arch
   29.10  from XendPBD import XendPBD
   29.11 @@ -91,7 +92,7 @@ class XendNode:
   29.12          for cpu_uuid, cpu in saved_cpus.items():
   29.13              self.cpus[cpu_uuid] = cpu
   29.14  
   29.15 -        cpuinfo = parse_proc_cpuinfo()
   29.16 +        cpuinfo = osdep.get_cpuinfo()
   29.17          physinfo = self.physinfo_dict()
   29.18          cpu_count = physinfo['nr_cpus']
   29.19          cpu_features = physinfo['hw_caps']
   29.20 @@ -743,31 +744,6 @@ class XendNode:
   29.21      def info_dict(self):
   29.22          return dict(self.info())
   29.23  
   29.24 -def parse_proc_cpuinfo():
   29.25 -    cpuinfo = {}
   29.26 -    f = file('/proc/cpuinfo', 'r')
   29.27 -    try:
   29.28 -        p = -1
   29.29 -        d = {}
   29.30 -        for line in f:
   29.31 -            keyvalue = line.split(':')
   29.32 -            if len(keyvalue) != 2:
   29.33 -                continue
   29.34 -            key = keyvalue[0].strip()
   29.35 -            val = keyvalue[1].strip()
   29.36 -            if key == 'processor':
   29.37 -                if p != -1:
   29.38 -                    cpuinfo[p] = d
   29.39 -                p = int(val)
   29.40 -                d = {}
   29.41 -            else:
   29.42 -                d[key] = val
   29.43 -        cpuinfo[p] = d
   29.44 -        return cpuinfo
   29.45 -    finally:
   29.46 -        f.close()
   29.47 -
   29.48 -
   29.49  def instance():
   29.50      global inst
   29.51      try:
    30.1 --- a/tools/python/xen/xend/balloon.py	Wed Jul 23 11:21:47 2008 +0900
    30.2 +++ b/tools/python/xen/xend/balloon.py	Wed Jul 23 12:10:20 2008 +0900
    30.3 @@ -39,11 +39,11 @@ SLEEP_TIME_GROWTH = 0.1
    30.4  
    30.5  # A mapping between easy-to-remember labels and the more verbose
    30.6  # label actually shown in the PROC_XEN_BALLOON file.
    30.7 -labels = { 'current'      : 'Current allocation',
    30.8 -           'target'       : 'Requested target',
    30.9 -           'low-balloon'  : 'Low-mem balloon',
   30.10 -           'high-balloon' : 'High-mem balloon',
   30.11 -           'limit'        : 'Xen hard limit' }
   30.12 +#labels = { 'current'      : 'Current allocation',
   30.13 +#           'target'       : 'Requested target',
   30.14 +#           'low-balloon'  : 'Low-mem balloon',
   30.15 +#           'high-balloon' : 'High-mem balloon',
   30.16 +#           'limit'        : 'Xen hard limit' }
   30.17  
   30.18  def _get_proc_balloon(label):
   30.19      """Returns the value for the named label.  Returns None if the label was
   30.20 @@ -54,7 +54,7 @@ def _get_proc_balloon(label):
   30.21  def get_dom0_current_alloc():
   30.22      """Returns the current memory allocation (in KiB) of dom0."""
   30.23  
   30.24 -    kb = _get_proc_balloon(labels['current'])
   30.25 +    kb = _get_proc_balloon('current')
   30.26      if kb == None:
   30.27          raise VmError('Failed to query current memory allocation of dom0.')
   30.28      return kb
   30.29 @@ -62,7 +62,7 @@ def get_dom0_current_alloc():
   30.30  def get_dom0_target_alloc():
   30.31      """Returns the target memory allocation (in KiB) of dom0."""
   30.32  
   30.33 -    kb = _get_proc_balloon(labels['target'])
   30.34 +    kb = _get_proc_balloon('target')
   30.35      if kb == None:
   30.36          raise VmError('Failed to query target memory allocation of dom0.')
   30.37      return kb
    31.1 --- a/tools/python/xen/xend/image.py	Wed Jul 23 11:21:47 2008 +0900
    31.2 +++ b/tools/python/xen/xend/image.py	Wed Jul 23 12:10:20 2008 +0900
    31.3 @@ -265,9 +265,12 @@ class ImageHandler:
    31.4          has_vnc = int(vmConfig['platform'].get('vnc', 0)) != 0
    31.5          has_sdl = int(vmConfig['platform'].get('sdl', 0)) != 0
    31.6          opengl = 1
    31.7 +        keymap = vmConfig['platform'].get("keymap")
    31.8          for dev_uuid in vmConfig['console_refs']:
    31.9              dev_type, dev_info = vmConfig['devices'][dev_uuid]
   31.10              if dev_type == 'vfb':
   31.11 +                if 'keymap' in dev_info:
   31.12 +                    keymap = dev_info.get('keymap',{})
   31.13                  vfb_type = dev_info.get('type', {})
   31.14                  if vfb_type == 'sdl':
   31.15                      self.display = dev_info.get('display', {})
   31.16 @@ -279,7 +282,6 @@ class ImageHandler:
   31.17                      has_vnc = True
   31.18                  break
   31.19  
   31.20 -        keymap = vmConfig['platform'].get("keymap")
   31.21          if keymap:
   31.22              ret.append("-k")
   31.23              ret.append(keymap)
    32.1 --- a/tools/python/xen/xend/osdep.py	Wed Jul 23 11:21:47 2008 +0900
    32.2 +++ b/tools/python/xen/xend/osdep.py	Wed Jul 23 12:10:20 2008 +0900
    32.3 @@ -41,12 +41,18 @@ import os
    32.4  def _linux_balloon_stat(label):
    32.5      """Returns the value for the named label, or None if an error occurs."""
    32.6  
    32.7 +    xend2linux_labels = { 'current'      : 'Current allocation',
    32.8 +                          'target'       : 'Requested target',
    32.9 +                          'low-balloon'  : 'Low-mem balloon',
   32.10 +                          'high-balloon' : 'High-mem balloon',
   32.11 +                          'limit'        : 'Xen hard limit' }
   32.12 +
   32.13      PROC_XEN_BALLOON = '/proc/xen/balloon'
   32.14      f = file(PROC_XEN_BALLOON, 'r')
   32.15      try:
   32.16          for line in f:
   32.17              keyvalue = line.split(':')
   32.18 -            if keyvalue[0] == label:
   32.19 +            if keyvalue[0] == xend2linux_labels[label]:
   32.20                  values = keyvalue[1].split()
   32.21                  if values[0].isdigit():
   32.22                      return int(values[0])
   32.23 @@ -67,11 +73,11 @@ def _solaris_balloon_stat(label):
   32.24      BLN_IOCTL_LOW = 0x42410003
   32.25      BLN_IOCTL_HIGH = 0x42410004
   32.26      BLN_IOCTL_LIMIT = 0x42410005
   32.27 -    label_to_ioctl = { 'Current allocation' : BLN_IOCTL_CURRENT,
   32.28 -                       'Requested target'   : BLN_IOCTL_TARGET,
   32.29 -                       'Low-mem balloon'    : BLN_IOCTL_LOW,
   32.30 -                       'High-mem balloon'   : BLN_IOCTL_HIGH,
   32.31 -                       'Xen hard limit'     : BLN_IOCTL_LIMIT }
   32.32 +    label_to_ioctl = { 'current'      : BLN_IOCTL_CURRENT,
   32.33 +                       'target'       : BLN_IOCTL_TARGET,
   32.34 +                       'low-balloon'  : BLN_IOCTL_LOW,
   32.35 +                       'high-balloon' : BLN_IOCTL_HIGH,
   32.36 +                       'limit'        : BLN_IOCTL_LIMIT }
   32.37  
   32.38      f = file(DEV_XEN_BALLOON, 'r')
   32.39      try:
   32.40 @@ -87,6 +93,33 @@ def _solaris_balloon_stat(label):
   32.41      "SunOS": _solaris_balloon_stat
   32.42  }
   32.43  
   32.44 +def _linux_get_cpuinfo():
   32.45 +    cpuinfo = {}
   32.46 +    f = file('/proc/cpuinfo', 'r')
   32.47 +    try:    
   32.48 +        p = -1  
   32.49 +        d = {}  
   32.50 +        for line in f:
   32.51 +            keyvalue = line.split(':')
   32.52 +            if len(keyvalue) != 2:
   32.53 +                continue 
   32.54 +            key = keyvalue[0].strip()
   32.55 +            val = keyvalue[1].strip()
   32.56 +            if key == 'processor':
   32.57 +                if p != -1:
   32.58 +                    cpuinfo[p] = d
   32.59 +                p = int(val)
   32.60 +                d = {}
   32.61 +            else:
   32.62 +                d[key] = val
   32.63 +        cpuinfo[p] = d
   32.64 +        return cpuinfo
   32.65 +    finally:
   32.66 +        f.close()
   32.67 +
   32.68 +_get_cpuinfo = {
   32.69 +}
   32.70 +
   32.71  def _get(var, default=None):
   32.72      return var.get(os.uname()[0], default)
   32.73  
   32.74 @@ -95,3 +128,4 @@ xend_autorestart = _get(_xend_autorestar
   32.75  pygrub_path = _get(_pygrub_path, "/usr/bin/pygrub")
   32.76  vif_script = _get(_vif_script, "vif-bridge")
   32.77  lookup_balloon_stat = _get(_balloon_stat, _linux_balloon_stat)
   32.78 +get_cpuinfo = _get(_get_cpuinfo, _linux_get_cpuinfo)
    33.1 --- a/tools/python/xen/xm/create.py	Wed Jul 23 11:21:47 2008 +0900
    33.2 +++ b/tools/python/xen/xm/create.py	Wed Jul 23 12:10:20 2008 +0900
    33.3 @@ -325,7 +325,7 @@ gopts.var('irq', val='IRQ',
    33.4           For example 'irq=7'.
    33.5           This option may be repeated to add more than one IRQ.""")
    33.6  
    33.7 -gopts.var('vfb', val="type={vnc,sdl},vncunused=1,vncdisplay=N,vnclisten=ADDR,display=DISPLAY,xauthority=XAUTHORITY,vncpasswd=PASSWORD,opengl=1",
    33.8 +gopts.var('vfb', val="type={vnc,sdl},vncunused=1,vncdisplay=N,vnclisten=ADDR,display=DISPLAY,xauthority=XAUTHORITY,vncpasswd=PASSWORD,opengl=1,keymap=FILE",
    33.9            fn=append_value, default=[],
   33.10            use="""Make the domain a framebuffer backend.
   33.11            The backend type should be either sdl or vnc.
   33.12 @@ -336,7 +336,8 @@ gopts.var('vfb', val="type={vnc,sdl},vnc
   33.13            default password.
   33.14            For type=sdl, a viewer will be started automatically using the
   33.15            given DISPLAY and XAUTHORITY, which default to the current user's
   33.16 -          ones.  OpenGL will be used by default unless opengl is set to 0.""")
   33.17 +          ones.  OpenGL will be used by default unless opengl is set to 0.
   33.18 +          keymap overrides the XendD configured default layout file.""")
   33.19  
   33.20  gopts.var('vif', val="type=TYPE,mac=MAC,bridge=BRIDGE,ip=IPADDR,script=SCRIPT," + \
   33.21            "backend=DOM,vifname=NAME,rate=RATE,model=MODEL,accel=ACCEL",
   33.22 @@ -741,7 +742,7 @@ def configure_vfbs(config_devs, vals):
   33.23          for (k,v) in d.iteritems():
   33.24              if not k in [ 'vnclisten', 'vncunused', 'vncdisplay', 'display',
   33.25                            'videoram', 'xauthority', 'type', 'vncpasswd',
   33.26 -                          'opengl' ]:
   33.27 +                          'opengl', 'keymap' ]:
   33.28                  err("configuration option %s unknown to vfbs" % k)
   33.29              config.append([k,v])
   33.30          if not d.has_key("keymap"):
   33.31 @@ -955,6 +956,10 @@ def preprocess_cpuid(vals, attr_name):
   33.32                  if reg_match == None:
   33.33                      err("cpuid's syntax is (eax|ebx|ecx|edx)=value")
   33.34                  res = reg_match.groupdict()
   33.35 +                if (res['val'][:2] != '0x' and len(res['val']) != 32):
   33.36 +                    err("cpuid: We should specify all the bits " \
   33.37 +                        "of the register %s for input %s\n"
   33.38 +                        % (res['reg'], input) )
   33.39                  cpuid[input][res['reg']] = res['val'] # new register
   33.40      setattr(vals, attr_name, cpuid)
   33.41  
    34.1 --- a/xen/arch/x86/acpi/cpufreq/cpufreq.c	Wed Jul 23 11:21:47 2008 +0900
    34.2 +++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c	Wed Jul 23 12:10:20 2008 +0900
    34.3 @@ -237,9 +237,9 @@ static u32 get_cur_val(cpumask_t mask)
    34.4   * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
    34.5   * no meaning should be associated with absolute values of these MSRs.
    34.6   */
    34.7 -/* FIXME: handle query on non-current cpu later */
    34.8 -static unsigned int get_measured_perf(unsigned int cpu)
    34.9 +static void  __get_measured_perf(void *perf_percent)
   34.10  {
   34.11 +    unsigned int *ratio = perf_percent;
   34.12      union {
   34.13          struct {
   34.14              uint32_t lo;
   34.15 @@ -248,9 +248,6 @@ static unsigned int get_measured_perf(un
   34.16          uint64_t whole;
   34.17      } aperf_cur, mperf_cur;
   34.18  
   34.19 -    unsigned int perf_percent;
   34.20 -    unsigned int retval;
   34.21 -
   34.22      rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi);
   34.23      rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi);
   34.24  
   34.25 @@ -264,10 +261,21 @@ static unsigned int get_measured_perf(un
   34.26      }
   34.27  
   34.28      if (aperf_cur.whole && mperf_cur.whole)
   34.29 -        perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole;
   34.30 +        *ratio = (aperf_cur.whole * 100) / mperf_cur.whole;
   34.31      else
   34.32 -        perf_percent = 0;
   34.33 +        *ratio = 0;
   34.34 +}
   34.35  
   34.36 +static unsigned int get_measured_perf(unsigned int cpu)
   34.37 +{
   34.38 +    unsigned int retval, perf_percent;
   34.39 +    cpumask_t cpumask;
   34.40 +
   34.41 +    if (!cpu_online(cpu))
   34.42 +        return 0;
   34.43 +
   34.44 +    cpumask = cpumask_of_cpu(cpu);
   34.45 +    on_selected_cpus(cpumask, __get_measured_perf, (void *)&perf_percent,0,1);
   34.46  
   34.47      retval = drv_data[cpu]->max_freq * perf_percent / 100;
   34.48      return retval;
   34.49 @@ -433,16 +441,13 @@ acpi_cpufreq_cpu_init(struct cpufreq_pol
   34.50      perf = data->acpi_data;
   34.51      policy->shared_type = perf->shared_type;
   34.52  
   34.53 -    /*
   34.54 -     * Will let policy->cpus know about dependency only when software
   34.55 -     * coordination is required.
   34.56 +    /* 
   34.57 +     * Currently the latest linux (kernel version 2.6.26) 
   34.58 +     * still has issue when handle the situation _psd HW_ALL coordination.
   34.59 +     * In Xen hypervisor, we handle _psd HW_ALL coordination in same way as
   34.60 +     * _psd SW_ALL coordination for the seek of safety.
   34.61       */
   34.62 -    if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
   34.63 -        policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
   34.64 -        policy->cpus = perf->shared_cpu_map;
   34.65 -    } else {
   34.66 -        policy->cpus = cpumask_of_cpu(cpu);    
   34.67 -    }
   34.68 +    policy->cpus = perf->shared_cpu_map;
   34.69  
   34.70      /* capability check */
   34.71      if (perf->state_count <= 1) {
    35.1 --- a/xen/arch/x86/domain.c	Wed Jul 23 11:21:47 2008 +0900
    35.2 +++ b/xen/arch/x86/domain.c	Wed Jul 23 12:10:20 2008 +0900
    35.3 @@ -286,10 +286,6 @@ int vcpu_initialise(struct vcpu *v)
    35.4  
    35.5      v->arch.flags = TF_kernel_mode;
    35.6  
    35.7 -    /* Ensure that update_vcpu_system_time() fires at least once. */
    35.8 -    if ( !is_idle_domain(d) )
    35.9 -        vcpu_info(v, time).tsc_timestamp = ~0ull;
   35.10 -
   35.11  #if defined(__i386__)
   35.12      mapcache_vcpu_init(v);
   35.13  #endif
    36.1 --- a/xen/arch/x86/i8259.c	Wed Jul 23 11:21:47 2008 +0900
    36.2 +++ b/xen/arch/x86/i8259.c	Wed Jul 23 12:10:20 2008 +0900
    36.3 @@ -411,7 +411,7 @@ void __init init_IRQ(void)
    36.4      apic_intr_init();
    36.5  
    36.6      /* Set the clock to HZ Hz */
    36.7 -#define CLOCK_TICK_RATE 1193180 /* crystal freq (Hz) */
    36.8 +#define CLOCK_TICK_RATE 1193182 /* crystal freq (Hz) */
    36.9  #define LATCH (((CLOCK_TICK_RATE)+(HZ/2))/HZ)
   36.10      outb_p(0x34, PIT_MODE);        /* binary, mode 2, LSB/MSB, ch 0 */
   36.11      outb_p(LATCH & 0xff, PIT_CH0); /* LSB */
    37.1 --- a/xen/arch/x86/setup.c	Wed Jul 23 11:21:47 2008 +0900
    37.2 +++ b/xen/arch/x86/setup.c	Wed Jul 23 12:10:20 2008 +0900
    37.3 @@ -521,14 +521,6 @@ void __init __start_xen(unsigned long mb
    37.4      if ( ((unsigned long)cpu0_stack & (STACK_SIZE-1)) != 0 )
    37.5          EARLY_FAIL("Misaligned CPU0 stack.\n");
    37.6  
    37.7 -    /*
    37.8 -     * Since there are some stubs getting built on the stacks which use
    37.9 -     * direct calls/jumps, the heap must be confined to the lower 2G so
   37.10 -     * that those branches can reach their targets.
   37.11 -     */
   37.12 -    if ( opt_xenheap_megabytes > 2048 )
   37.13 -        opt_xenheap_megabytes = 2048;
   37.14 -
   37.15      if ( e820_raw_nr != 0 )
   37.16      {
   37.17          memmap_type = "Xen-e820";
   37.18 @@ -600,6 +592,23 @@ void __init __start_xen(unsigned long mb
   37.19      /* Sanitise the raw E820 map to produce a final clean version. */
   37.20      max_page = init_e820(memmap_type, e820_raw, &e820_raw_nr);
   37.21  
   37.22 +#ifdef CONFIG_X86_64
   37.23 +    /*
   37.24 +     * On x86/64 we are able to account for the allocation bitmap
   37.25 +     * (allocated in common/page_alloc.c:init_boot_allocator()) stealing
   37.26 +     * from the Xen heap. Here we make the Xen heap appropriately larger.
   37.27 +     */
   37.28 +    opt_xenheap_megabytes += (max_page / 8) >> 20;
   37.29 +#endif
   37.30 +
   37.31 +    /*
   37.32 +     * Since there are some stubs getting built on the stacks which use
   37.33 +     * direct calls/jumps, the heap must be confined to the lower 2G so
   37.34 +     * that those branches can reach their targets.
   37.35 +     */
   37.36 +    if ( opt_xenheap_megabytes > 2048 )
   37.37 +        opt_xenheap_megabytes = 2048;
   37.38 +
   37.39      /* Create a temporary copy of the E820 map. */
   37.40      memcpy(&boot_e820, &e820, sizeof(e820));
   37.41  
    38.1 --- a/xen/arch/x86/time.c	Wed Jul 23 11:21:47 2008 +0900
    38.2 +++ b/xen/arch/x86/time.c	Wed Jul 23 12:10:20 2008 +0900
    38.3 @@ -214,7 +214,7 @@ static struct irqaction irq0 = { timer_i
    38.4   * Return processor ticks per second / CALIBRATE_FRAC.
    38.5   */
    38.6  
    38.7 -#define CLOCK_TICK_RATE 1193180 /* system crystal frequency (Hz) */
    38.8 +#define CLOCK_TICK_RATE 1193182 /* system crystal frequency (Hz) */
    38.9  #define CALIBRATE_FRAC  20      /* calibrate over 50ms */
   38.10  #define CALIBRATE_LATCH ((CLOCK_TICK_RATE+(CALIBRATE_FRAC/2))/CALIBRATE_FRAC)
   38.11  
   38.12 @@ -484,40 +484,29 @@ static int init_pmtimer(struct platform_
   38.13   * PLATFORM TIMER 5: TSC
   38.14   */
   38.15  
   38.16 -#define platform_timer_is_tsc() (!strcmp(plt_src.name, "TSC"))
   38.17 -static u64 tsc_freq;
   38.18 -
   38.19 -static u64 read_tsc_count(void)
   38.20 -{
   38.21 -    u64 tsc;
   38.22 -    rdtscll(tsc);
   38.23 -    return tsc;
   38.24 -}
   38.25 +static const char plt_tsc_name[] = "TSC";
   38.26 +#define platform_timer_is_tsc() (plt_src.name == plt_tsc_name)
   38.27  
   38.28  static int init_tsctimer(struct platform_timesource *pts)
   38.29  {
   38.30 -    unsigned int cpu;
   38.31 +    if ( !tsc_invariant )
   38.32 +        return 0;
   38.33  
   38.34 -    /*
   38.35 -     * TODO: evaluate stability of TSC here, return 0 if not stable.
   38.36 -     * For now we assume all TSCs are synchronised and hence can all share
   38.37 -     * CPU 0's calibration values.
   38.38 -     */
   38.39 -    for_each_cpu ( cpu )
   38.40 -    {
   38.41 -        if ( cpu == 0 )
   38.42 -            continue;
   38.43 -        memcpy(&per_cpu(cpu_time, cpu),
   38.44 -               &per_cpu(cpu_time, 0),
   38.45 -               sizeof(struct cpu_time));
   38.46 -    }
   38.47 +    pts->name = (char *)plt_tsc_name;
   38.48 +    return 1;
   38.49 +}
   38.50  
   38.51 -    pts->name = "TSC";
   38.52 -    pts->frequency = tsc_freq;
   38.53 -    pts->read_counter = read_tsc_count;
   38.54 -    pts->counter_bits = 64;
   38.55 +static void make_tsctimer_record(void)
   38.56 +{
   38.57 +    struct cpu_time *t = &this_cpu(cpu_time);
   38.58 +    s_time_t now;
   38.59 +    u64 tsc;
   38.60  
   38.61 -    return 1;
   38.62 +    rdtscll(tsc);
   38.63 +    now = scale_delta(tsc, &t->tsc_scale);
   38.64 +
   38.65 +    t->local_tsc_stamp = tsc;
   38.66 +    t->stime_local_stamp = t->stime_master_stamp = now;
   38.67  }
   38.68  
   38.69  /************************************************************
   38.70 @@ -585,6 +574,12 @@ static void platform_time_calibration(vo
   38.71  
   38.72  static void resume_platform_timer(void)
   38.73  {
   38.74 +    if ( platform_timer_is_tsc() )
   38.75 +    {
   38.76 +        /* TODO: Save/restore TSC values. */
   38.77 +        return;
   38.78 +    }
   38.79 +
   38.80      /* No change in platform_stime across suspend/resume. */
   38.81      platform_timer_stamp = plt_stamp64;
   38.82      plt_stamp = plt_src.read_counter();
   38.83 @@ -620,6 +615,12 @@ static void init_platform_timer(void)
   38.84           !init_pmtimer(pts) )
   38.85          init_pit(pts);
   38.86  
   38.87 +    if ( platform_timer_is_tsc() )
   38.88 +    {
   38.89 +        printk("Platform timer is TSC\n");
   38.90 +        return;
   38.91 +    }
   38.92 +
   38.93      plt_mask = (u64)~0ull >> (64 - pts->counter_bits);
   38.94  
   38.95      set_time_scale(&plt_scale, pts->frequency);
   38.96 @@ -907,6 +908,14 @@ static void local_time_calibration(void 
   38.97      /* The overall calibration scale multiplier. */
   38.98      u32 calibration_mul_frac;
   38.99  
  38.100 +    if ( platform_timer_is_tsc() )
  38.101 +    {
  38.102 +        make_tsctimer_record(); 
  38.103 +        update_vcpu_system_time(current);
  38.104 +        set_timer(&t->calibration_timer, NOW() + MILLISECS(10*1000));
  38.105 +        return;
  38.106 +    }
  38.107 +
  38.108      prev_tsc          = t->local_tsc_stamp;
  38.109      prev_local_stime  = t->stime_local_stamp;
  38.110      prev_master_stime = t->stime_master_stamp;
  38.111 @@ -1025,16 +1034,20 @@ void init_percpu_time(void)
  38.112      s_time_t now;
  38.113  
  38.114      if ( platform_timer_is_tsc() )
  38.115 -        return;
  38.116 +    {
  38.117 +        make_tsctimer_record();
  38.118 +        goto out;
  38.119 +    }
  38.120  
  38.121      local_irq_save(flags);
  38.122      rdtscll(t->local_tsc_stamp);
  38.123 -    now = read_platform_stime();
  38.124 +    now = !plt_src.read_counter ? 0 : read_platform_stime();
  38.125      local_irq_restore(flags);
  38.126  
  38.127      t->stime_master_stamp = now;
  38.128      t->stime_local_stamp  = now;
  38.129  
  38.130 + out:
  38.131      init_timer(&t->calibration_timer, local_time_calibration,
  38.132                 NULL, smp_processor_id());
  38.133      set_timer(&t->calibration_timer, NOW() + EPOCH);
  38.134 @@ -1043,19 +1056,19 @@ void init_percpu_time(void)
  38.135  /* Late init function (after all CPUs are booted). */
  38.136  int __init init_xen_time(void)
  38.137  {
  38.138 -    wc_sec = get_cmos_time();
  38.139 +    local_irq_disable();
  38.140  
  38.141 -    local_irq_disable();
  38.142 +    /* check if TSC is invariant during deep C state
  38.143 +       this is a new feature introduced by Nehalem*/
  38.144 +    if ( cpuid_edx(0x80000007) & (1u<<8) )
  38.145 +        tsc_invariant = 1;
  38.146 +
  38.147 +    init_percpu_time();
  38.148  
  38.149      stime_platform_stamp = 0;
  38.150      init_platform_timer();
  38.151  
  38.152 -    init_percpu_time();
  38.153 -
  38.154 -    /* check if TSC is invariant during deep C state
  38.155 -       this is a new feature introduced by Nehalem*/
  38.156 -    if ( cpuid_edx(0x80000007) & (1U<<8) )
  38.157 -        tsc_invariant = 1;
  38.158 +    do_settime(get_cmos_time(), 0, NOW());
  38.159  
  38.160      local_irq_enable();
  38.161  
  38.162 @@ -1068,7 +1081,6 @@ void __init early_time_init(void)
  38.163  {
  38.164      u64 tmp = init_pit_and_calibrate_tsc();
  38.165  
  38.166 -    tsc_freq = tmp;
  38.167      set_time_scale(&this_cpu(cpu_time).tsc_scale, tmp);
  38.168  
  38.169      do_div(tmp, 1000);
  38.170 @@ -1170,9 +1182,9 @@ int time_resume(void)
  38.171  
  38.172      resume_platform_timer();
  38.173  
  38.174 -    do_settime(get_cmos_time() + cmos_utc_offset, 0, read_platform_stime());
  38.175 +    init_percpu_time();
  38.176  
  38.177 -    init_percpu_time();
  38.178 +    do_settime(get_cmos_time() + cmos_utc_offset, 0, NOW());
  38.179  
  38.180      if ( !is_idle_vcpu(current) )
  38.181          update_vcpu_system_time(current);
    39.1 --- a/xen/common/keyhandler.c	Wed Jul 23 11:21:47 2008 +0900
    39.2 +++ b/xen/common/keyhandler.c	Wed Jul 23 12:10:20 2008 +0900
    39.3 @@ -240,10 +240,12 @@ static s_time_t read_clocks_time[NR_CPUS
    39.4  static void read_clocks_slave(void *unused)
    39.5  {
    39.6      unsigned int cpu = smp_processor_id();
    39.7 +    local_irq_disable();
    39.8      while ( !cpu_isset(cpu, read_clocks_cpumask) )
    39.9          cpu_relax();
   39.10      read_clocks_time[cpu] = NOW();
   39.11      cpu_clear(cpu, read_clocks_cpumask);
   39.12 +    local_irq_enable();
   39.13  }
   39.14  
   39.15  static void read_clocks(unsigned char key)
    40.1 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c	Wed Jul 23 11:21:47 2008 +0900
    40.2 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Wed Jul 23 12:10:20 2008 +0900
    40.3 @@ -620,11 +620,49 @@ static int amd_iommu_return_device(
    40.4  
    40.5  static int amd_iommu_add_device(struct pci_dev *pdev)
    40.6  {
    40.7 +    struct amd_iommu *iommu;
    40.8 +    u16 bdf;
    40.9 +    if ( !pdev->domain )
   40.10 +        return -EINVAL;
   40.11 +
   40.12 +    bdf = (pdev->bus << 8) | pdev->devfn;
   40.13 +    iommu = (bdf < ivrs_bdf_entries) ?
   40.14 +    find_iommu_for_device(pdev->bus, pdev->devfn) : NULL;
   40.15 +
   40.16 +    if ( !iommu )
   40.17 +    {
   40.18 +        amd_iov_error("Fail to find iommu."
   40.19 +            " %x:%x.%x cannot be assigned to domain %d\n", 
   40.20 +            pdev->bus, PCI_SLOT(pdev->devfn),
   40.21 +            PCI_FUNC(pdev->devfn), pdev->domain->domain_id);
   40.22 +        return -ENODEV;
   40.23 +    }
   40.24 +
   40.25 +    amd_iommu_setup_domain_device(pdev->domain, iommu, bdf);
   40.26      return 0;
   40.27  }
   40.28  
   40.29  static int amd_iommu_remove_device(struct pci_dev *pdev)
   40.30  {
   40.31 +    struct amd_iommu *iommu;
   40.32 +    u16 bdf;
   40.33 +    if ( !pdev->domain )
   40.34 +        return -EINVAL;
   40.35 +
   40.36 +    bdf = (pdev->bus << 8) | pdev->devfn;
   40.37 +    iommu = (bdf < ivrs_bdf_entries) ?
   40.38 +    find_iommu_for_device(pdev->bus, pdev->devfn) : NULL;
   40.39 +
   40.40 +    if ( !iommu )
   40.41 +    {
   40.42 +        amd_iov_error("Fail to find iommu."
   40.43 +            " %x:%x.%x cannot be removed from domain %d\n", 
   40.44 +            pdev->bus, PCI_SLOT(pdev->devfn),
   40.45 +            PCI_FUNC(pdev->devfn), pdev->domain->domain_id);
   40.46 +        return -ENODEV;
   40.47 +    }
   40.48 +
   40.49 +    amd_iommu_disable_domain_device(pdev->domain, iommu, bdf);
   40.50      return 0;
   40.51  }
   40.52  
    41.1 --- a/xen/drivers/passthrough/vtd/dmar.h	Wed Jul 23 11:21:47 2008 +0900
    41.2 +++ b/xen/drivers/passthrough/vtd/dmar.h	Wed Jul 23 12:10:20 2008 +0900
    41.3 @@ -76,7 +76,7 @@ struct acpi_atsr_unit {
    41.4  #define for_each_rmrr_device(rmrr, bdf, idx)            \
    41.5      list_for_each_entry(rmrr, &acpi_rmrr_units, list)   \
    41.6          /* assume there never is a bdf == 0 */          \
    41.7 -        for (idx = 0; (bdf = rmrr->scope.devices[i]) && \
    41.8 +        for (idx = 0; (bdf = rmrr->scope.devices[idx]) && \
    41.9                   idx < rmrr->scope.devices_cnt; idx++)
   41.10  
   41.11  struct acpi_drhd_unit * acpi_find_matched_drhd_unit(u8 bus, u8 devfn);
    42.1 --- a/xen/drivers/passthrough/vtd/iommu.c	Wed Jul 23 11:21:47 2008 +0900
    42.2 +++ b/xen/drivers/passthrough/vtd/iommu.c	Wed Jul 23 12:10:20 2008 +0900
    42.3 @@ -1294,11 +1294,18 @@ static int domain_context_mapping(struct
    42.4      return ret;
    42.5  }
    42.6  
    42.7 -static int domain_context_unmap_one(struct iommu *iommu, u8 bus, u8 devfn)
    42.8 +static int domain_context_unmap_one(
    42.9 +    struct domain *domain,
   42.10 +    struct iommu *iommu,
   42.11 +    u8 bus, u8 devfn)
   42.12  {
   42.13      struct context_entry *context, *context_entries;
   42.14      unsigned long flags;
   42.15      u64 maddr;
   42.16 +    struct acpi_rmrr_unit *rmrr;
   42.17 +    u16 bdf;
   42.18 +    int i;
   42.19 +    unsigned int is_rmrr_device = 0;
   42.20  
   42.21      maddr = bus_to_context_maddr(iommu, bus);
   42.22      context_entries = (struct context_entry *)map_vtd_domain_page(maddr);
   42.23 @@ -1311,18 +1318,32 @@ static int domain_context_unmap_one(stru
   42.24      }
   42.25  
   42.26      spin_lock_irqsave(&iommu->lock, flags);
   42.27 -    context_clear_present(*context);
   42.28 -    context_clear_entry(*context);
   42.29 -    iommu_flush_cache_entry(context);
   42.30 -    iommu_flush_context_global(iommu, 0);
   42.31 -    iommu_flush_iotlb_global(iommu, 0);
   42.32 +    if ( domain->domain_id == 0 )
   42.33 +    {
   42.34 +        for_each_rmrr_device ( rmrr, bdf, i )
   42.35 +        {
   42.36 +            if ( PCI_BUS(bdf) == bus && PCI_DEVFN2(bdf) == devfn )
   42.37 +            {
   42.38 +                is_rmrr_device = 1;
   42.39 +                break;
   42.40 +            }
   42.41 +        }
   42.42 +    }
   42.43 +    if ( !is_rmrr_device )
   42.44 +    {
   42.45 +        context_clear_present(*context);
   42.46 +        context_clear_entry(*context);
   42.47 +        iommu_flush_cache_entry(context);
   42.48 +        iommu_flush_context_domain(iommu, domain_iommu_domid(domain), 0);
   42.49 +        iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0);
   42.50 +    }
   42.51      unmap_vtd_domain_page(context_entries);
   42.52      spin_unlock_irqrestore(&iommu->lock, flags);
   42.53  
   42.54      return 0;
   42.55  }
   42.56  
   42.57 -static int domain_context_unmap(u8 bus, u8 devfn)
   42.58 +static int domain_context_unmap(struct domain *domain, u8 bus, u8 devfn)
   42.59  {
   42.60      struct acpi_drhd_unit *drhd;
   42.61      u16 sec_bus, sub_bus;
   42.62 @@ -1345,18 +1366,18 @@ static int domain_context_unmap(u8 bus, 
   42.63                                   PCI_SUBORDINATE_BUS);
   42.64          /*dmar_scope_remove_buses(&drhd->scope, sec_bus, sub_bus);*/
   42.65          if ( DEV_TYPE_PCI_BRIDGE )
   42.66 -            ret = domain_context_unmap_one(drhd->iommu, bus, devfn);
   42.67 +            ret = domain_context_unmap_one(domain, drhd->iommu, bus, devfn);
   42.68          break;
   42.69  
   42.70      case DEV_TYPE_PCIe_ENDPOINT:
   42.71 -        ret = domain_context_unmap_one(drhd->iommu, bus, devfn);
   42.72 +        ret = domain_context_unmap_one(domain, drhd->iommu, bus, devfn);
   42.73          break;
   42.74  
   42.75      case DEV_TYPE_PCI:
   42.76          if ( find_pcie_endpoint(&bus, &devfn, &secbus) )
   42.77 -            ret = domain_context_unmap_one(drhd->iommu, bus, devfn);
   42.78 +            ret = domain_context_unmap_one(domain, drhd->iommu, bus, devfn);
   42.79          if ( bus != secbus )
   42.80 -            domain_context_unmap_one(drhd->iommu, secbus, 0);
   42.81 +            domain_context_unmap_one(domain, drhd->iommu, secbus, 0);
   42.82          break;
   42.83  
   42.84      default:
   42.85 @@ -1386,7 +1407,7 @@ static int reassign_device_ownership(
   42.86  
   42.87      drhd = acpi_find_matched_drhd_unit(bus, devfn);
   42.88      pdev_iommu = drhd->iommu;
   42.89 -    domain_context_unmap(bus, devfn);
   42.90 +    domain_context_unmap(source, bus, devfn);
   42.91  
   42.92      write_lock(&pcidevs_lock);
   42.93      list_move(&pdev->domain_list, &target->arch.pdev_list);
   42.94 @@ -1584,7 +1605,9 @@ static int intel_iommu_add_device(struct
   42.95  
   42.96  static int intel_iommu_remove_device(struct pci_dev *pdev)
   42.97  {
   42.98 -    return domain_context_unmap(pdev->bus, pdev->devfn);
   42.99 +    if ( !pdev->domain )
  42.100 +        return -EINVAL;
  42.101 +    return domain_context_unmap(pdev->domain, pdev->bus, pdev->devfn);
  42.102  }
  42.103  
  42.104  static void setup_dom0_devices(struct domain *d)
    43.1 --- a/xen/include/asm-x86/hvm/vpt.h	Wed Jul 23 11:21:47 2008 +0900
    43.2 +++ b/xen/include/asm-x86/hvm/vpt.h	Wed Jul 23 12:10:20 2008 +0900
    43.3 @@ -95,7 +95,7 @@ struct periodic_time {
    43.4  };
    43.5  
    43.6  
    43.7 -#define PIT_FREQ 1193181
    43.8 +#define PIT_FREQ 1193182
    43.9  #define PIT_BASE 0x40
   43.10  
   43.11  typedef struct PITState {
    44.1 --- a/xen/include/public/xsm/flask_op.h	Wed Jul 23 11:21:47 2008 +0900
    44.2 +++ b/xen/include/public/xsm/flask_op.h	Wed Jul 23 12:10:20 2008 +0900
    44.3 @@ -32,10 +32,12 @@
    44.4  #define FLASK_AVC_CACHESTATS    19
    44.5  #define FLASK_MEMBER            20
    44.6  
    44.7 +#define FLASK_LAST              FLASK_MEMBER
    44.8 +
    44.9  typedef struct flask_op {
   44.10 -    int   cmd;
   44.11 -    int   size;
   44.12 -    char *buf;
   44.13 +    uint32_t  cmd;
   44.14 +    uint32_t  size;
   44.15 +    char      *buf;
   44.16  } flask_op_t;
   44.17  
   44.18  DEFINE_XEN_GUEST_HANDLE(flask_op_t);
    45.1 --- a/xen/include/xen/pci.h	Wed Jul 23 11:21:47 2008 +0900
    45.2 +++ b/xen/include/xen/pci.h	Wed Jul 23 12:10:20 2008 +0900
    45.3 @@ -24,10 +24,10 @@
    45.4  #define PCI_BUS(bdf)    (((bdf) >> 8) & 0xff)
    45.5  #define PCI_SLOT(bdf)   (((bdf) >> 3) & 0x1f)
    45.6  #define PCI_FUNC(bdf)   ((bdf) & 0x07)
    45.7 -#define PCI_DEVFN(d,f)  (((d & 0x1f) << 3) | (f & 0x07))
    45.8 +#define PCI_DEVFN(d,f)  ((((d) & 0x1f) << 3) | ((f) & 0x07))
    45.9  #define PCI_DEVFN2(bdf) ((bdf) & 0xff)
   45.10 -#define PCI_BDF(b,d,f)  (((b * 0xff) << 8) | PCI_DEVFN(d,f))
   45.11 -#define PCI_BDF2(b,df)  (((b & 0xff) << 8) | (df & 0xff))
   45.12 +#define PCI_BDF(b,d,f)  ((((b) & 0xff) << 8) | PCI_DEVFN(d,f))
   45.13 +#define PCI_BDF2(b,df)  ((((b) & 0xff) << 8) | ((df) & 0xff))
   45.14  
   45.15  struct pci_dev {
   45.16      struct list_head alldevs_list;
    46.1 --- a/xen/include/xsm/xsm.h	Wed Jul 23 11:21:47 2008 +0900
    46.2 +++ b/xen/include/xsm/xsm.h	Wed Jul 23 12:10:20 2008 +0900
    46.3 @@ -108,7 +108,6 @@ struct xsm_operations {
    46.4      int (*schedop_shutdown) (struct domain *d1, struct domain *d2);
    46.5  
    46.6      long (*__do_xsm_op) (XEN_GUEST_HANDLE(xsm_op_t) op);
    46.7 -    void (*complete_init) (struct domain *d);
    46.8  
    46.9  #ifdef CONFIG_X86
   46.10      int (*shadow_control) (struct domain *d, uint32_t op);
   46.11 @@ -392,11 +391,6 @@ static inline long __do_xsm_op (XEN_GUES
   46.12      return xsm_call(__do_xsm_op(op));
   46.13  }
   46.14  
   46.15 -static inline void xsm_complete_init (struct domain *d)
   46.16 -{
   46.17 -    xsm_call(complete_init(d));
   46.18 -}
   46.19 -
   46.20  #ifdef XSM_ENABLE
   46.21  extern int xsm_init(unsigned int *initrdidx, const multiboot_info_t *mbi,
   46.22                                            unsigned long initial_images_start);
    47.1 --- a/xen/xsm/dummy.c	Wed Jul 23 11:21:47 2008 +0900
    47.2 +++ b/xen/xsm/dummy.c	Wed Jul 23 12:10:20 2008 +0900
    47.3 @@ -254,11 +254,6 @@ static void dummy_free_security_evtchn (
    47.4      return;
    47.5  }
    47.6  
    47.7 -static void dummy_complete_init (struct domain *d)
    47.8 -{
    47.9 -    return;
   47.10 -}
   47.11 -
   47.12  static long dummy___do_xsm_op(XEN_GUEST_HANDLE(xsm_op_t) op)
   47.13  {
   47.14      return -ENOSYS;
   47.15 @@ -462,7 +457,6 @@ void xsm_fixup_ops (struct xsm_operation
   47.16      set_to_dummy_if_null(ops, schedop_shutdown);
   47.17  
   47.18      set_to_dummy_if_null(ops, __do_xsm_op);
   47.19 -    set_to_dummy_if_null(ops, complete_init);
   47.20  
   47.21  #ifdef CONFIG_X86
   47.22      set_to_dummy_if_null(ops, shadow_control);
    48.1 --- a/xen/xsm/flask/avc.c	Wed Jul 23 11:21:47 2008 +0900
    48.2 +++ b/xen/xsm/flask/avc.c	Wed Jul 23 12:10:20 2008 +0900
    48.3 @@ -250,7 +250,7 @@ void __init avc_init(void)
    48.4      printk("AVC INITIALIZED\n");
    48.5  }
    48.6  
    48.7 -int avc_get_hash_stats(char *page)
    48.8 +int avc_get_hash_stats(char *buf, uint32_t size)
    48.9  {
   48.10      int i, chain_len, max_chain_len, slots_used;
   48.11      struct avc_node *node;
   48.12 @@ -274,7 +274,7 @@ int avc_get_hash_stats(char *page)
   48.13  
   48.14      rcu_read_unlock();
   48.15      
   48.16 -    return snprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
   48.17 +    return snprintf(buf, size, "entries: %d\nbuckets used: %d/%d\n"
   48.18                                  "longest chain: %d\n",
   48.19                                  atomic_read(&avc_cache.active_nodes),
   48.20                                  slots_used, AVC_CACHE_SLOTS, max_chain_len);
    49.1 --- a/xen/xsm/flask/flask_op.c	Wed Jul 23 11:21:47 2008 +0900
    49.2 +++ b/xen/xsm/flask/flask_op.c	Wed Jul 23 12:10:20 2008 +0900
    49.3 @@ -29,6 +29,43 @@ int flask_enabled = 1;
    49.4  integer_param("flask_enabled", flask_enabled);
    49.5  #endif
    49.6  
    49.7 +#define MAX_POLICY_SIZE 0x4000000
    49.8 +#define FLASK_COPY_IN \
    49.9 +    ( \
   49.10 +        1UL<<FLASK_LOAD | \
   49.11 +        1UL<<FLASK_SETENFORCE | \
   49.12 +        1UL<<FLASK_CONTEXT_TO_SID | \
   49.13 +        1UL<<FLASK_SID_TO_CONTEXT | \
   49.14 +        1UL<<FLASK_ACCESS | \
   49.15 +        1UL<<FLASK_CREATE | \
   49.16 +        1UL<<FLASK_RELABEL | \
   49.17 +        1UL<<FLASK_USER | \
   49.18 +        1UL<<FLASK_GETBOOL | \
   49.19 +        1UL<<FLASK_SETBOOL | \
   49.20 +        1UL<<FLASK_COMMITBOOLS | \
   49.21 +        1UL<<FLASK_DISABLE | \
   49.22 +        1UL<<FLASK_SETAVC_THRESHOLD | \
   49.23 +        1UL<<FLASK_MEMBER \
   49.24 +    )
   49.25 +
   49.26 +#define FLASK_COPY_OUT \
   49.27 +    ( \
   49.28 +        1UL<<FLASK_GETENFORCE | \
   49.29 +        1UL<<FLASK_CONTEXT_TO_SID | \
   49.30 +        1UL<<FLASK_SID_TO_CONTEXT | \
   49.31 +        1UL<<FLASK_ACCESS | \
   49.32 +        1UL<<FLASK_CREATE | \
   49.33 +        1UL<<FLASK_RELABEL | \
   49.34 +        1UL<<FLASK_USER | \
   49.35 +        1UL<<FLASK_POLICYVERS | \
   49.36 +        1UL<<FLASK_GETBOOL | \
   49.37 +        1UL<<FLASK_MLS | \
   49.38 +        1UL<<FLASK_GETAVC_THRESHOLD | \
   49.39 +        1UL<<FLASK_AVC_HASHSTATS | \
   49.40 +        1UL<<FLASK_AVC_CACHESTATS | \
   49.41 +        1UL<<FLASK_MEMBER \
   49.42 +    )
   49.43 +
   49.44  static DEFINE_SPINLOCK(sel_sem);
   49.45  
   49.46  /* global data for booleans */
   49.47 @@ -51,7 +88,7 @@ static int domain_has_security(struct do
   49.48                                                                  perms, NULL);
   49.49  }
   49.50  
   49.51 -static int flask_security_user(char *buf, int size)
   49.52 +static int flask_security_user(char *buf, uint32_t size)
   49.53  {
   49.54      char *page = NULL;
   49.55      char *con, *user, *ptr;
   49.56 @@ -82,12 +119,8 @@ static int flask_security_user(char *buf
   49.57          goto out2;
   49.58      memset(page, 0, PAGE_SIZE);
   49.59  
   49.60 -    length = -EFAULT;
   49.61 -    if ( copy_from_user(page, buf, size) )
   49.62 -        goto out2;
   49.63 -        
   49.64      length = -EINVAL;
   49.65 -    if ( sscanf(page, "%s %s", con, user) != 2 )
   49.66 +    if ( sscanf(buf, "%s %s", con, user) != 2 )
   49.67          goto out2;
   49.68  
   49.69      length = security_context_to_sid(con, strlen(con)+1, &sid);
   49.70 @@ -98,7 +131,6 @@ static int flask_security_user(char *buf
   49.71      if ( length < 0 )
   49.72          goto out2;
   49.73      
   49.74 -    memset(page, 0, PAGE_SIZE);
   49.75      length = snprintf(page, PAGE_SIZE, "%u", nsids) + 1;
   49.76      ptr = page + length;
   49.77      for ( i = 0; i < nsids; i++ )
   49.78 @@ -121,8 +153,16 @@ static int flask_security_user(char *buf
   49.79          length += len;
   49.80      }
   49.81      
   49.82 -    if ( copy_to_user(buf, page, length) )
   49.83 -        length = -EFAULT;
   49.84 +    if ( length > size )
   49.85 +    {
   49.86 +        printk( "%s:  context size (%u) exceeds payload "
   49.87 +                "max\n", __FUNCTION__, length);
   49.88 +        length = -ERANGE;
   49.89 +        goto out3;
   49.90 +    }
   49.91 +
   49.92 +    memset(buf, 0, size);
   49.93 +    memcpy(buf, page, length);
   49.94          
   49.95  out3:
   49.96      xfree(sids);
   49.97 @@ -135,7 +175,7 @@ out:
   49.98      return length;
   49.99  }
  49.100  
  49.101 -static int flask_security_relabel(char *buf, int size)
  49.102 +static int flask_security_relabel(char *buf, uint32_t size)
  49.103  {
  49.104      char *scon, *tcon;
  49.105      u32 ssid, tsid, newsid;
  49.106 @@ -178,17 +218,18 @@ static int flask_security_relabel(char *
  49.107      if ( length < 0 )
  49.108          goto out2;
  49.109              
  49.110 -    if ( len > PAGE_SIZE )
  49.111 +    if ( len > size )
  49.112      {
  49.113 +        printk( "%s:  context size (%u) exceeds payload "
  49.114 +                "max\n", __FUNCTION__, len);
  49.115          length = -ERANGE;
  49.116          goto out3;
  49.117      }
  49.118 -        
  49.119 -    if ( copy_to_user(buf, newcon, len) )
  49.120 -        len = -EFAULT;
  49.121  
  49.122 +    memset(buf, 0, size);
  49.123 +    memcpy(buf, newcon, len);
  49.124      length = len;
  49.125 -        
  49.126 +
  49.127  out3:
  49.128      xfree(newcon);
  49.129  out2:
  49.130 @@ -198,7 +239,7 @@ out:
  49.131      return length;
  49.132  }
  49.133  
  49.134 -static int flask_security_create(char *buf, int size)
  49.135 +static int flask_security_create(char *buf, uint32_t size)
  49.136  {
  49.137      char *scon, *tcon;
  49.138      u32 ssid, tsid, newsid;
  49.139 @@ -242,7 +283,7 @@ static int flask_security_create(char *b
  49.140      if ( length < 0 )    
  49.141          goto out2;
  49.142  
  49.143 -    if ( len > PAGE_SIZE )
  49.144 +    if ( len > size )
  49.145      {
  49.146          printk( "%s:  context size (%u) exceeds payload "
  49.147                  "max\n", __FUNCTION__, len);
  49.148 @@ -250,9 +291,8 @@ static int flask_security_create(char *b
  49.149          goto out3;
  49.150      }
  49.151  
  49.152 -    if ( copy_to_user(buf, newcon, len) )
  49.153 -        len = -EFAULT;
  49.154 -
  49.155 +    memset(buf, 0, size);
  49.156 +    memcpy(buf, newcon, len);
  49.157      length = len;
  49.158          
  49.159  out3:
  49.160 @@ -264,9 +304,8 @@ out:
  49.161      return length;
  49.162  }
  49.163  
  49.164 -static int flask_security_access(char *buf, int size)
  49.165 +static int flask_security_access(char *buf, uint32_t size)
  49.166  {
  49.167 -    char *page = NULL;
  49.168      char *scon, *tcon;
  49.169      u32 ssid, tsid;
  49.170      u16 tclass;
  49.171 @@ -305,23 +344,12 @@ static int flask_security_access(char *b
  49.172      if ( length < 0 )
  49.173          goto out2;
  49.174  
  49.175 -    page = (char *)xmalloc_bytes(PAGE_SIZE);
  49.176 -    if ( !page )
  49.177 -    {
  49.178 -        length = -ENOMEM;
  49.179 -        goto out2;
  49.180 -    }
  49.181 -
  49.182 -    memset(page, 0, PAGE_SIZE);
  49.183 -
  49.184 -    length = snprintf(page, PAGE_SIZE, "%x %x %x %x %u", 
  49.185 +    memset(buf, 0, size);
  49.186 +    length = snprintf(buf, size, "%x %x %x %x %u", 
  49.187                                          avd.allowed, avd.decided,
  49.188                                          avd.auditallow, avd.auditdeny, 
  49.189                                          avd.seqno);
  49.190                  
  49.191 -    if ( copy_to_user(buf, page, length) )
  49.192 -        length = -EFAULT;
  49.193 -        
  49.194  out2:
  49.195      xfree(tcon);
  49.196  out:
  49.197 @@ -329,7 +357,7 @@ out:
  49.198      return length;
  49.199  }
  49.200  
  49.201 -static int flask_security_member(char *buf, int size)
  49.202 +static int flask_security_member(char *buf, uint32_t size)
  49.203  {
  49.204      char *scon, *tcon;
  49.205      u32 ssid, tsid, newsid;
  49.206 @@ -373,7 +401,7 @@ static int flask_security_member(char *b
  49.207      if ( length < 0 )
  49.208          goto out2;
  49.209  
  49.210 -    if ( len > PAGE_SIZE )
  49.211 +    if ( len > size )
  49.212      {
  49.213          printk("%s:  context size (%u) exceeds payload "
  49.214                  "max\n", __FUNCTION__, len);
  49.215 @@ -381,9 +409,8 @@ static int flask_security_member(char *b
  49.216          goto out3;
  49.217      }
  49.218  
  49.219 -    if ( copy_to_user(buf, newcon, len) )
  49.220 -        len = -EFAULT;
  49.221 -
  49.222 +    memset(buf, 0, size);
  49.223 +    memcpy(buf, newcon, len);
  49.224      length = len;
  49.225  
  49.226  out3:
  49.227 @@ -395,26 +422,13 @@ out:
  49.228      return length;
  49.229  }
  49.230  
  49.231 -static int flask_security_setenforce(char *buf, int count)
  49.232 +static int flask_security_setenforce(char *buf, uint32_t count)
  49.233  {
  49.234 -    char *page = NULL;
  49.235      int length;
  49.236      int new_value;
  49.237  
  49.238 -    if ( count < 0 || count >= PAGE_SIZE )
  49.239 -        return -ENOMEM;
  49.240 -
  49.241 -    page = (char *)xmalloc_bytes(PAGE_SIZE);
  49.242 -    if ( !page )
  49.243 -        return -ENOMEM;
  49.244 -    memset(page, 0, PAGE_SIZE);
  49.245 -    length = -EFAULT;
  49.246 -    if ( copy_from_user(page, buf, count) )
  49.247 -        goto out;
  49.248 -
  49.249 -    length = -EINVAL;
  49.250 -    if ( sscanf(page, "%d", &new_value) != 1 )
  49.251 -        goto out;
  49.252 +    if ( sscanf(buf, "%d", &new_value) != 1 )
  49.253 +        return -EINVAL;
  49.254  
  49.255      if ( new_value != flask_enforcing )
  49.256      {
  49.257 @@ -428,13 +442,11 @@ static int flask_security_setenforce(cha
  49.258      length = count;
  49.259  
  49.260  out:
  49.261 -    xfree(page);
  49.262      return length;
  49.263  }
  49.264  
  49.265 -static int flask_security_context(char *buf, int count)
  49.266 +static int flask_security_context(char *buf, uint32_t count)
  49.267  {
  49.268 -    char *page = NULL;
  49.269      u32 sid;
  49.270      int length;
  49.271  
  49.272 @@ -442,35 +454,19 @@ static int flask_security_context(char *
  49.273      if ( length )
  49.274          goto out;
  49.275  
  49.276 -    if ( count < 0 || count >= PAGE_SIZE )
  49.277 -        return -ENOMEM;
  49.278 -
  49.279 -    page = (char *)xmalloc_bytes(PAGE_SIZE);
  49.280 -    if ( !page )
  49.281 -        return -ENOMEM;
  49.282 -    memset(page, 0, PAGE_SIZE);
  49.283 -    length = -EFAULT;
  49.284 -    if ( copy_from_user(page, buf, count) )
  49.285 -        goto out;
  49.286 -
  49.287 -    length = security_context_to_sid(page, count, &sid);
  49.288 +    length = security_context_to_sid(buf, count, &sid);
  49.289      if ( length < 0 )
  49.290          goto out;
  49.291  
  49.292 -    memset(page, 0, PAGE_SIZE);
  49.293 -    length = snprintf(page, PAGE_SIZE, "%u", sid);
  49.294 -
  49.295 -    if ( copy_to_user(buf, page, count) )
  49.296 -        length = -EFAULT;
  49.297 +    memset(buf, 0, count);
  49.298 +    length = snprintf(buf, count, "%u", sid);
  49.299  
  49.300  out:
  49.301 -    xfree(page);
  49.302      return length;
  49.303  }
  49.304  
  49.305 -static int flask_security_sid(char *buf, int count)
  49.306 +static int flask_security_sid(char *buf, uint32_t count)
  49.307  {
  49.308 -    char *page = NULL;
  49.309      char *context;
  49.310      u32 sid;
  49.311      u32 len;
  49.312 @@ -480,31 +476,20 @@ static int flask_security_sid(char *buf,
  49.313      if ( length )
  49.314          goto out;
  49.315  
  49.316 -    if ( count < 0 || count >= PAGE_SIZE )
  49.317 -        return -ENOMEM;
  49.318 -
  49.319 -    page = (char *)xmalloc_bytes(PAGE_SIZE);
  49.320 -    if ( !page )
  49.321 -        return -ENOMEM;
  49.322 -    memset(page, 0, PAGE_SIZE);
  49.323 -    length = -EFAULT;
  49.324 -    if ( copy_from_user(page, buf, count) )
  49.325 -        goto out;
  49.326 -
  49.327 -    if ( sscanf(page, "%u", &sid) != 1 )
  49.328 +    if ( sscanf(buf, "%u", &sid) != 1 )
  49.329          goto out;
  49.330  
  49.331      length = security_sid_to_context(sid, &context, &len);
  49.332      if ( length < 0 )
  49.333          goto out;
  49.334  
  49.335 -    if ( copy_to_user(buf, context, len) )
  49.336 -        length = -EFAULT;
  49.337 -    
  49.338 +    memset(buf, 0, count);
  49.339 +    memcpy(buf, context, len);
  49.340 +    length = len;
  49.341 +
  49.342      xfree(context);
  49.343  
  49.344  out:
  49.345 -    xfree(page);
  49.346      return length;
  49.347  }
  49.348  
  49.349 @@ -534,24 +519,13 @@ int flask_disable(void)
  49.350      return 0;
  49.351  }
  49.352  
  49.353 -static int flask_security_disable(char *buf, int count)
  49.354 +static int flask_security_disable(char *buf, uint32_t count)
  49.355  {
  49.356 -    char *page = NULL;
  49.357      int length;
  49.358      int new_value;
  49.359  
  49.360 -    if ( count < 0 || count >= PAGE_SIZE )
  49.361 -        return -ENOMEM;
  49.362 -    page = (char *)xmalloc_bytes(PAGE_SIZE);
  49.363 -    if ( !page )
  49.364 -        return -ENOMEM;
  49.365 -    memset(page, 0, PAGE_SIZE);
  49.366 -    length = -EFAULT;
  49.367 -    if ( copy_from_user(page, buf, count) )
  49.368 -        goto out;
  49.369 -
  49.370      length = -EINVAL;
  49.371 -    if ( sscanf(page, "%d", &new_value) != 1 )
  49.372 +    if ( sscanf(buf, "%d", &new_value) != 1 )
  49.373          goto out;
  49.374  
  49.375      if ( new_value )
  49.376 @@ -564,57 +538,35 @@ static int flask_security_disable(char *
  49.377      length = count;
  49.378  
  49.379  out:
  49.380 -    xfree(page);
  49.381      return length;
  49.382  }
  49.383  
  49.384 -static int flask_security_setavc_threshold(char *buf, int count)
  49.385 +static int flask_security_setavc_threshold(char *buf, uint32_t count)
  49.386  {
  49.387 -    char *page = NULL;
  49.388      int ret;
  49.389      int new_value;
  49.390  
  49.391 -    if ( count < 0 || count >= PAGE_SIZE )
  49.392 -    {
  49.393 -        ret = -ENOMEM;
  49.394 -        goto out;
  49.395 -    }
  49.396 -
  49.397 -    page = (char*)xmalloc_bytes(PAGE_SIZE);
  49.398 -    if (!page)
  49.399 -        return -ENOMEM;
  49.400 -    memset(page, 0, PAGE_SIZE);
  49.401 -
  49.402 -    if ( copy_from_user(page, buf, count) )
  49.403 -    {
  49.404 -        ret = -EFAULT;
  49.405 -        goto out_free;
  49.406 -    }
  49.407 -
  49.408 -    if ( sscanf(page, "%u", &new_value) != 1 )
  49.409 +    if ( sscanf(buf, "%u", &new_value) != 1 )
  49.410      {
  49.411          ret = -EINVAL;
  49.412 -        goto out_free;
  49.413 +        goto out;
  49.414      }
  49.415  
  49.416      if ( new_value != avc_cache_threshold )
  49.417      {
  49.418          ret = domain_has_security(current->domain, SECURITY__SETSECPARAM);
  49.419          if ( ret )
  49.420 -            goto out_free;
  49.421 +            goto out;
  49.422          avc_cache_threshold = new_value;
  49.423      }
  49.424      ret = count;
  49.425  
  49.426 -out_free:
  49.427 -    xfree(page);
  49.428  out:
  49.429      return ret;
  49.430  }
  49.431  
  49.432 -static int flask_security_set_bool(char *buf, int count)
  49.433 +static int flask_security_set_bool(char *buf, uint32_t count)
  49.434  {
  49.435 -    char *page = NULL;
  49.436      int length = -EFAULT;
  49.437      int i, new_value;
  49.438  
  49.439 @@ -624,25 +576,8 @@ static int flask_security_set_bool(char 
  49.440      if ( length )
  49.441          goto out;
  49.442  
  49.443 -    if ( count < 0 || count >= PAGE_SIZE )
  49.444 -    {
  49.445 -        length = -ENOMEM;
  49.446 -        goto out;
  49.447 -    }
  49.448 -
  49.449 -    page = (char *)xmalloc_bytes(PAGE_SIZE);
  49.450 -    if ( !page )
  49.451 -    {
  49.452 -        length = -ENOMEM;
  49.453 -        goto out;
  49.454 -    }
  49.455 -    memset(page, 0, PAGE_SIZE);
  49.456 -
  49.457 -    if ( copy_from_user(page, buf, count) )
  49.458 -        goto out;
  49.459 -
  49.460      length = -EINVAL;
  49.461 -    if ( sscanf(page, "%d %d", &i, &new_value) != 2 )
  49.462 +    if ( sscanf(buf, "%d %d", &i, &new_value) != 2 )
  49.463          goto out;
  49.464  
  49.465      if ( new_value )
  49.466 @@ -655,14 +590,11 @@ static int flask_security_set_bool(char 
  49.467  
  49.468  out:
  49.469      spin_unlock(&sel_sem);
  49.470 -    if ( page )
  49.471 -        xfree(page);
  49.472      return length;
  49.473  }
  49.474  
  49.475 -static int flask_security_commit_bools(char *buf, int count)
  49.476 +static int flask_security_commit_bools(char *buf, uint32_t count)
  49.477  {
  49.478 -    char *page = NULL;
  49.479      int length = -EFAULT;
  49.480      int new_value;
  49.481  
  49.482 @@ -672,25 +604,8 @@ static int flask_security_commit_bools(c
  49.483      if ( length )
  49.484          goto out;
  49.485  
  49.486 -    if ( count < 0 || count >= PAGE_SIZE )
  49.487 -    {
  49.488 -        length = -ENOMEM;
  49.489 -        goto out;
  49.490 -    }
  49.491 -
  49.492 -    page = (char *)xmalloc_bytes(PAGE_SIZE);
  49.493 -    if ( !page )
  49.494 -    {
  49.495 -        length = -ENOMEM;
  49.496 -        goto out;
  49.497 -    }
  49.498 -    memset(page, 0, PAGE_SIZE);
  49.499 -
  49.500 -    if ( copy_from_user(page, buf, count) )
  49.501 -        goto out;
  49.502 -
  49.503      length = -EINVAL;
  49.504 -    if ( sscanf(page, "%d", &new_value) != 1 )
  49.505 +    if ( sscanf(buf, "%d", &new_value) != 1 )
  49.506          goto out;
  49.507  
  49.508      if ( new_value )
  49.509 @@ -700,40 +615,18 @@ static int flask_security_commit_bools(c
  49.510  
  49.511  out:
  49.512      spin_unlock(&sel_sem);
  49.513 -    if ( page )
  49.514 -        xfree(page);
  49.515      return length;
  49.516  }
  49.517  
  49.518 -static int flask_security_get_bool(char *buf, int count)
  49.519 +static int flask_security_get_bool(char *buf, uint32_t count)
  49.520  {
  49.521 -    char *page = NULL;
  49.522      int length;
  49.523      int i, cur_enforcing;
  49.524      
  49.525      spin_lock(&sel_sem);
  49.526      
  49.527 -    length = -EFAULT;
  49.528 -
  49.529 -    if ( count < 0 || count > PAGE_SIZE )
  49.530 -    {
  49.531 -        length = -EINVAL;
  49.532 -        goto out;
  49.533 -    }
  49.534 -
  49.535 -    page = (char *)xmalloc_bytes(PAGE_SIZE);
  49.536 -    if ( !page )
  49.537 -    {
  49.538 -        length = -ENOMEM;
  49.539 -        goto out;
  49.540 -    }
  49.541 -    memset(page, 0, PAGE_SIZE);
  49.542 -
  49.543 -    if ( copy_from_user(page, buf, count) )
  49.544 -        goto out;
  49.545 -
  49.546      length = -EINVAL;
  49.547 -    if ( sscanf(page, "%d", &i) != 1 )
  49.548 +    if ( sscanf(buf, "%d", &i) != 1 )
  49.549          goto out;
  49.550  
  49.551      cur_enforcing = security_get_bool_value(i);
  49.552 @@ -743,18 +636,12 @@ static int flask_security_get_bool(char 
  49.553          goto out;
  49.554      }
  49.555  
  49.556 -    length = snprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
  49.557 +    memset(buf, 0, count);
  49.558 +    length = snprintf(buf, count, "%d %d", cur_enforcing,
  49.559                  bool_pending_values[i]);
  49.560 -    if ( length < 0 )
  49.561 -        goto out;
  49.562 -
  49.563 -    if ( copy_to_user(buf, page, length) )
  49.564 -        length = -EFAULT;
  49.565  
  49.566  out:
  49.567      spin_unlock(&sel_sem);
  49.568 -    if ( page )
  49.569 -        xfree(page);
  49.570      return length;
  49.571  }
  49.572  
  49.573 @@ -786,7 +673,7 @@ out:
  49.574  
  49.575  #ifdef FLASK_AVC_STATS
  49.576  
  49.577 -static int flask_security_avc_cachestats(char *buf, int count)
  49.578 +static int flask_security_avc_cachestats(char *buf, uint32_t count)
  49.579  {
  49.580      char *page = NULL;
  49.581      int len = 0;
  49.582 @@ -802,9 +689,15 @@ static int flask_security_avc_cachestats
  49.583  
  49.584      len = snprintf(page, PAGE_SIZE, "lookups hits misses allocations reclaims "
  49.585                                                                     "frees\n");
  49.586 +    if ( len > count ) {
  49.587 +        length = -EINVAL;
  49.588 +        goto out;
  49.589 +    }
  49.590 +    
  49.591      memcpy(buf, page, len);
  49.592      buf += len;
  49.593      length += len;
  49.594 +    count -= len;
  49.595  
  49.596      for ( cpu = idx; cpu < NR_CPUS; ++cpu )
  49.597      {
  49.598 @@ -816,22 +709,27 @@ static int flask_security_avc_cachestats
  49.599          len = snprintf(page, PAGE_SIZE, "%u %u %u %u %u %u\n", st->lookups,
  49.600                                         st->hits, st->misses, st->allocations,
  49.601                                                         st->reclaims, st->frees);
  49.602 +        if ( len > count ) {
  49.603 +            length = -EINVAL;
  49.604 +            goto out;
  49.605 +        }
  49.606          memcpy(buf, page, len);
  49.607          buf += len;
  49.608          length += len;
  49.609 +        count -= len;
  49.610      }
  49.611  
  49.612 +out:
  49.613      xfree(page);    
  49.614      return length;
  49.615  }
  49.616  
  49.617  #endif
  49.618  
  49.619 -static int flask_security_load(char *buf, int count)
  49.620 +static int flask_security_load(char *buf, uint32_t count)
  49.621  {
  49.622      int ret;
  49.623      int length;
  49.624 -    void *data = NULL;
  49.625  
  49.626      spin_lock(&sel_sem);
  49.627  
  49.628 @@ -839,18 +737,7 @@ static int flask_security_load(char *buf
  49.629      if ( length )
  49.630          goto out;
  49.631  
  49.632 -    if ( (count < 0) || (count > 64 * 1024 * 1024) 
  49.633 -                               || (data = xmalloc_array(char, count)) == NULL )
  49.634 -    {
  49.635 -        length = -ENOMEM;
  49.636 -        goto out;
  49.637 -    }
  49.638 -
  49.639 -    length = -EFAULT;
  49.640 -    if ( copy_from_user(data, buf, count) != 0 )
  49.641 -        goto out;
  49.642 -
  49.643 -    length = security_load_policy(data, count);
  49.644 +    length = security_load_policy(buf, count);
  49.645      if ( length )
  49.646          goto out;
  49.647  
  49.648 @@ -862,7 +749,6 @@ static int flask_security_load(char *buf
  49.649  
  49.650  out:
  49.651      spin_unlock(&sel_sem);
  49.652 -    xfree(data);
  49.653      return length;
  49.654  }
  49.655  
  49.656 @@ -871,188 +757,156 @@ long do_flask_op(XEN_GUEST_HANDLE(xsm_op
  49.657      flask_op_t curop, *op = &curop;
  49.658      int rc = 0;
  49.659      int length = 0;
  49.660 -    char *page = NULL;
  49.661 +    char *arg = NULL;
  49.662  
  49.663      if ( copy_from_guest(op, u_flask_op, 1) )
  49.664          return -EFAULT;
  49.665  
  49.666 +    if ( op->cmd > FLASK_LAST)
  49.667 +        return -EINVAL;
  49.668 +
  49.669 +    if ( op->size > MAX_POLICY_SIZE )
  49.670 +        return -EINVAL;
  49.671 +
  49.672 +    if ( (op->buf == NULL && op->size != 0) || 
  49.673 +                                    (op->buf != NULL && op->size == 0) )
  49.674 +        return -EINVAL;
  49.675 +
  49.676 +    arg = xmalloc_bytes(op->size + 1);
  49.677 +    if ( !arg )
  49.678 +        return -ENOMEM;
  49.679 +
  49.680 +    memset(arg, 0, op->size + 1);
  49.681 +
  49.682 +    if ( (FLASK_COPY_IN&(1UL<<op->cmd)) && op->buf != NULL && 
  49.683 +           copy_from_guest(arg, guest_handle_from_ptr(op->buf, char), op->size) )
  49.684 +    {
  49.685 +        rc = -EFAULT;
  49.686 +        goto out;
  49.687 +    }
  49.688 +
  49.689      switch ( op->cmd )
  49.690      {
  49.691  
  49.692      case FLASK_LOAD:
  49.693      {
  49.694 -        length = flask_security_load(op->buf, op->size);
  49.695 +        length = flask_security_load(arg, op->size);
  49.696      }
  49.697      break;
  49.698      
  49.699      case FLASK_GETENFORCE:
  49.700      {
  49.701 -        page = (char *)xmalloc_bytes(PAGE_SIZE);
  49.702 -        if ( !page )
  49.703 -            return -ENOMEM;
  49.704 -        memset(page, 0, PAGE_SIZE);
  49.705 -        
  49.706 -        length = snprintf(page, PAGE_SIZE, "%d", flask_enforcing);
  49.707 -        
  49.708 -        if ( copy_to_user(op->buf, page, length) )
  49.709 -        {
  49.710 -            rc = -EFAULT;
  49.711 -            goto out;
  49.712 -        }
  49.713 +        length = snprintf(arg, op->size, "%d", flask_enforcing);
  49.714      }
  49.715      break;    
  49.716  
  49.717      case FLASK_SETENFORCE:
  49.718      {
  49.719 -        length = flask_security_setenforce(op->buf, op->size);
  49.720 +        length = flask_security_setenforce(arg, op->size);
  49.721      }
  49.722      break;    
  49.723  
  49.724      case FLASK_CONTEXT_TO_SID:
  49.725      {
  49.726 -        length = flask_security_context(op->buf, op->size);
  49.727 +        length = flask_security_context(arg, op->size);
  49.728      }
  49.729      break;    
  49.730  
  49.731      case FLASK_SID_TO_CONTEXT:
  49.732      {
  49.733 -        length = flask_security_sid(op->buf, op->size);
  49.734 +        length = flask_security_sid(arg, op->size);
  49.735      }
  49.736      break; 
  49.737  
  49.738      case FLASK_ACCESS:
  49.739      {
  49.740 -        length = flask_security_access(op->buf, op->size);
  49.741 +        length = flask_security_access(arg, op->size);
  49.742      }
  49.743      break;    
  49.744  
  49.745      case FLASK_CREATE:
  49.746      {
  49.747 -        length = flask_security_create(op->buf, op->size);
  49.748 +        length = flask_security_create(arg, op->size);
  49.749      }
  49.750      break;    
  49.751  
  49.752      case FLASK_RELABEL:
  49.753      {
  49.754 -        length = flask_security_relabel(op->buf, op->size);
  49.755 +        length = flask_security_relabel(arg, op->size);
  49.756      }
  49.757      break;
  49.758  
  49.759      case FLASK_USER:
  49.760      {
  49.761 -        length = flask_security_user(op->buf, op->size);
  49.762 +        length = flask_security_user(arg, op->size);
  49.763      }
  49.764      break;    
  49.765  
  49.766      case FLASK_POLICYVERS:
  49.767      {
  49.768 -        page = (char *)xmalloc_bytes(PAGE_SIZE);
  49.769 -        if ( !page )
  49.770 -            return -ENOMEM;
  49.771 -        memset(page, 0, PAGE_SIZE);
  49.772 -
  49.773 -        length = snprintf(page, PAGE_SIZE, "%d", POLICYDB_VERSION_MAX);
  49.774 -
  49.775 -        if ( copy_to_user(op->buf, page, length) )
  49.776 -        {
  49.777 -            rc = -EFAULT;
  49.778 -            goto out;
  49.779 -        }
  49.780 +        length = snprintf(arg, op->size, "%d", POLICYDB_VERSION_MAX);
  49.781      }
  49.782      break;    
  49.783  
  49.784      case FLASK_GETBOOL:
  49.785      {
  49.786 -        length = flask_security_get_bool(op->buf, op->size);
  49.787 +        length = flask_security_get_bool(arg, op->size);
  49.788      }
  49.789      break;
  49.790  
  49.791      case FLASK_SETBOOL:
  49.792      {
  49.793 -        length = flask_security_set_bool(op->buf, op->size);
  49.794 +        length = flask_security_set_bool(arg, op->size);
  49.795      }
  49.796      break;
  49.797  
  49.798      case FLASK_COMMITBOOLS:
  49.799      {
  49.800 -        length = flask_security_commit_bools(op->buf, op->size);
  49.801 +        length = flask_security_commit_bools(arg, op->size);
  49.802      }
  49.803      break;
  49.804  
  49.805      case FLASK_MLS:
  49.806      {
  49.807 -        page = (char *)xmalloc_bytes(PAGE_SIZE);
  49.808 -        if ( !page )
  49.809 -            return -ENOMEM;
  49.810 -        memset(page, 0, PAGE_SIZE);
  49.811 -
  49.812 -        length = snprintf(page, PAGE_SIZE, "%d", flask_mls_enabled);
  49.813 -
  49.814 -        if ( copy_to_user(op->buf, page, length) )
  49.815 -        {
  49.816 -            rc = -EFAULT;
  49.817 -            goto out;
  49.818 -        }
  49.819 +        length = snprintf(arg, op->size, "%d", flask_mls_enabled);
  49.820      }
  49.821      break;    
  49.822  
  49.823      case FLASK_DISABLE:
  49.824      {
  49.825 -        length = flask_security_disable(op->buf, op->size);
  49.826 +        length = flask_security_disable(arg, op->size);
  49.827      }
  49.828      break;    
  49.829  
  49.830      case FLASK_GETAVC_THRESHOLD:
  49.831      {
  49.832 -        page = (char *)xmalloc_bytes(PAGE_SIZE);
  49.833 -        if ( !page )
  49.834 -            return -ENOMEM;
  49.835 -        memset(page, 0, PAGE_SIZE);
  49.836 -
  49.837 -        length = snprintf(page, PAGE_SIZE, "%d", avc_cache_threshold);
  49.838 -
  49.839 -        if ( copy_to_user(op->buf, page, length) )
  49.840 -        {
  49.841 -            rc = -EFAULT;
  49.842 -            goto out;
  49.843 -        }
  49.844 +        length = snprintf(arg, op->size, "%d", avc_cache_threshold);
  49.845      }
  49.846      break;
  49.847  
  49.848      case FLASK_SETAVC_THRESHOLD:
  49.849      {
  49.850 -        length = flask_security_setavc_threshold(op->buf, op->size);
  49.851 +        length = flask_security_setavc_threshold(arg, op->size);
  49.852      }
  49.853      break;
  49.854  
  49.855      case FLASK_AVC_HASHSTATS:
  49.856      {
  49.857 -        page = (char *)xmalloc_bytes(PAGE_SIZE);
  49.858 -        if ( !page )
  49.859 -            return -ENOMEM;
  49.860 -        memset(page, 0, PAGE_SIZE);
  49.861 -
  49.862 -        length = avc_get_hash_stats(page);
  49.863 -
  49.864 -        if ( copy_to_user(op->buf, page, length) )
  49.865 -        {
  49.866 -            rc = -EFAULT;
  49.867 -            goto out;
  49.868 -        }
  49.869 +        length = avc_get_hash_stats(arg, op->size);
  49.870      }
  49.871      break;
  49.872  
  49.873  #ifdef FLASK_AVC_STATS    
  49.874      case FLASK_AVC_CACHESTATS:
  49.875      {
  49.876 -        length = flask_security_avc_cachestats(op->buf, op->size);
  49.877 +        length = flask_security_avc_cachestats(arg, op->size);
  49.878      }
  49.879      break;
  49.880 -#endif    
  49.881 +#endif
  49.882  
  49.883      case FLASK_MEMBER:
  49.884      {
  49.885 -        length = flask_security_member(op->buf, op->size);
  49.886 +        length = flask_security_member(arg, op->size);
  49.887      }
  49.888      break;    
  49.889  
  49.890 @@ -1067,13 +921,19 @@ long do_flask_op(XEN_GUEST_HANDLE(xsm_op
  49.891          rc = length;
  49.892          goto out;
  49.893      }
  49.894 +    
  49.895 +    if ( (FLASK_COPY_OUT&(1UL<<op->cmd)) && op->buf != NULL && 
  49.896 +             copy_to_guest(guest_handle_from_ptr(op->buf, char), arg, op->size) )
  49.897 +    {
  49.898 +        rc = -EFAULT;
  49.899 +        goto out;
  49.900 +    }
  49.901 +
  49.902      op->size = length;
  49.903      if ( copy_to_guest(u_flask_op, op, 1) )
  49.904          rc = -EFAULT;
  49.905  
  49.906  out:
  49.907 -    if ( page )
  49.908 -        xfree(page);
  49.909 +    xfree(arg);
  49.910      return rc;
  49.911  }
  49.912 -
    50.1 --- a/xen/xsm/flask/hooks.c	Wed Jul 23 11:21:47 2008 +0900
    50.2 +++ b/xen/xsm/flask/hooks.c	Wed Jul 23 12:10:20 2008 +0900
    50.3 @@ -714,18 +714,6 @@ static int flask_perfcontrol(void)
    50.4      return domain_has_xen(current->domain, XEN__PERFCONTROL);
    50.5  }
    50.6  
    50.7 -void flask_complete_init(struct domain *d)
    50.8 -{
    50.9 -    struct domain_security_struct *dsec;
   50.10 -
   50.11 -    /* Set the security state for the Dom0 domain. */
   50.12 -    dsec = d->ssid;
   50.13 -    dsec->sid = SECINITSID_DOM0;
   50.14 -    dsec->create_sid = SECINITSID_UNLABELED;
   50.15 -
   50.16 -    printk("Flask:  Completed initialization.\n");
   50.17 -}
   50.18 -
   50.19  #ifdef CONFIG_X86
   50.20  static int flask_shadow_control(struct domain *d, uint32_t op)
   50.21  {
   50.22 @@ -1101,7 +1089,6 @@ static struct xsm_operations flask_ops =
   50.23      .schedop_shutdown = flask_schedop_shutdown,
   50.24  
   50.25      .__do_xsm_op = do_flask_op,
   50.26 -    .complete_init = flask_complete_init,    
   50.27  
   50.28  #ifdef CONFIG_X86
   50.29      .shadow_control = flask_shadow_control,
    51.1 --- a/xen/xsm/flask/include/avc.h	Wed Jul 23 11:21:47 2008 +0900
    51.2 +++ b/xen/xsm/flask/include/avc.h	Wed Jul 23 12:10:20 2008 +0900
    51.3 @@ -95,7 +95,7 @@ int avc_add_callback(int (*callback)(u32
    51.4                                      u32 ssid, u32 tsid, u16 tclass, u32 perms);
    51.5  
    51.6  /* Exported to selinuxfs */
    51.7 -int avc_get_hash_stats(char *page);
    51.8 +int avc_get_hash_stats(char *buf, uint32_t size);
    51.9  extern unsigned int avc_cache_threshold;
   51.10  
   51.11  #ifdef FLASK_AVC_STATS