ia64/xen-unstable

changeset 3817:2e88f4d9217f

bitkeeper revision 1.1159.261.3 (420e3c341h1fbkH3NCtXo63yPlvjGg)

Hand merge
author mafetter@fleming.research
date Sat Feb 12 17:26:12 2005 +0000 (2005-02-12)
parents 3f8766806bdd 58be428f51a8
children 64c43cfd7068
files .rootkeys linux-2.4.29-xen-sparse/arch/xen/kernel/setup.c linux-2.6.10-xen-sparse/arch/xen/i386/kernel/setup.c linux-2.6.10-xen-sparse/drivers/xen/blkfront/block.h linux-2.6.10-xen-sparse/drivers/xen/blkfront/vbd.c linux-2.6.10-xen-sparse/drivers/xen/netback/netback.c linux-2.6.10-xen-sparse/drivers/xen/netfront/netfront.c tools/vnet/00INSTALL tools/vnet/Makefile tools/vnet/vnet-module/if_varp.h tools/vnet/vnet-module/varp.c xen/arch/x86/domain.c xen/arch/x86/extable.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/arch/x86/x86_32/entry.S xen/include/asm-x86/shadow.h xen/include/xen/perfc_defn.h
line diff
     1.1 --- a/.rootkeys	Sat Feb 12 16:34:28 2005 +0000
     1.2 +++ b/.rootkeys	Sat Feb 12 17:26:12 2005 +0000
     1.3 @@ -744,6 +744,7 @@ 4104ffca-jPHLVOrW0n0VghEXXtKxg tools/sv/
     1.4  40fcefb3yMSrZvApO9ToIi-iQwnchA tools/sv/images/xen.png
     1.5  41013a83z27rKvWIxAfUBMVZ1eDCDg tools/sv/inc/script.js
     1.6  40fcefb3zGC9XNBkSwTEobCoq8YClA tools/sv/inc/style.css
     1.7 +420b963dK3yGNtqxRM8npGZtrCQd1g tools/vnet/00INSTALL
     1.8  41a21888_WlknVWjSxb32Fo13_ujsw tools/vnet/00README
     1.9  41a21888bOiOJc7blzRbe4MNJoaYTw tools/vnet/Makefile
    1.10  41a21888mg2k5HeiVjlQYEtJBZT4Qg tools/vnet/doc/vnet-module.txt
     2.1 --- a/linux-2.4.29-xen-sparse/arch/xen/kernel/setup.c	Sat Feb 12 16:34:28 2005 +0000
     2.2 +++ b/linux-2.4.29-xen-sparse/arch/xen/kernel/setup.c	Sat Feb 12 17:26:12 2005 +0000
     2.3 @@ -240,7 +240,9 @@ void __init setup_arch(char **cmdline_p)
     2.4      boot_cpu_data.pgd_quick = cpu0_pgd_quicklist;
     2.5      boot_cpu_data.pte_quick = cpu0_pte_quicklist;
     2.6  
     2.7 -    ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
     2.8 +    /* This must be initialized to UNNAMED_MAJOR for ipconfig to work
     2.9 +       properly.  Setting ROOT_DEV to default to /dev/ram0 breaks initrd. */
    2.10 +    ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
    2.11      memset(&drive_info, 0, sizeof(drive_info));
    2.12      memset(&screen_info, 0, sizeof(screen_info));
    2.13      
     3.1 --- a/linux-2.6.10-xen-sparse/arch/xen/i386/kernel/setup.c	Sat Feb 12 16:34:28 2005 +0000
     3.2 +++ b/linux-2.6.10-xen-sparse/arch/xen/i386/kernel/setup.c	Sat Feb 12 17:26:12 2005 +0000
     3.3 @@ -1360,7 +1360,10 @@ void __init setup_arch(char **cmdline_p)
     3.4  		efi_enabled = 1;
     3.5  #endif
     3.6  
     3.7 - 	ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); /*old_decode_dev(ORIG_ROOT_DEV);*/
     3.8 +	/* This must be initialized to UNNAMED_MAJOR for ipconfig to work
     3.9 +	   properly.  Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
    3.10 +	*/
    3.11 +	ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
    3.12   	drive_info = DRIVE_INFO;
    3.13   	screen_info = SCREEN_INFO;
    3.14  	edid_info = EDID_INFO;
     4.1 --- a/linux-2.6.10-xen-sparse/drivers/xen/blkfront/block.h	Sat Feb 12 16:34:28 2005 +0000
     4.2 +++ b/linux-2.6.10-xen-sparse/drivers/xen/blkfront/block.h	Sat Feb 12 17:26:12 2005 +0000
     4.3 @@ -5,7 +5,7 @@
     4.4   * 
     4.5   * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
     4.6   * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
     4.7 - * Copyright (c) 2004, Christian Limpach
     4.8 + * Copyright (c) 2004-2005, Christian Limpach
     4.9   * 
    4.10   * This file may be distributed separately from the Linux kernel, or
    4.11   * incorporated into other software packages, subject to the following license:
    4.12 @@ -69,7 +69,8 @@ struct xlbd_type_info {
    4.13      int devs_per_major;
    4.14      int hardsect_size;
    4.15      int max_sectors;
    4.16 -    char *name;
    4.17 +    char *devname;
    4.18 +    char *diskname;
    4.19  };
    4.20  
    4.21  /*
     5.1 --- a/linux-2.6.10-xen-sparse/drivers/xen/blkfront/vbd.c	Sat Feb 12 16:34:28 2005 +0000
     5.2 +++ b/linux-2.6.10-xen-sparse/drivers/xen/blkfront/vbd.c	Sat Feb 12 17:26:12 2005 +0000
     5.3 @@ -5,7 +5,7 @@
     5.4   * 
     5.5   * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
     5.6   * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
     5.7 - * Copyright (c) 2004, Christian Limpach
     5.8 + * Copyright (c) 2004-2005, Christian Limpach
     5.9   * 
    5.10   * This file may be distributed separately from the Linux kernel, or
    5.11   * incorporated into other software packages, subject to the following license:
    5.12 @@ -49,7 +49,8 @@ static struct xlbd_type_info xlbd_ide_ty
    5.13      .hardsect_size = 512,
    5.14      .max_sectors = 128,  /* 'hwif->rqsize' if we knew it */
    5.15      // XXXcl todo read_ahead[major]    = 8; /* from drivers/ide/ide-probe.c */
    5.16 -    .name = "hd",
    5.17 +    .devname = "ide",
    5.18 +    .diskname = "hd",
    5.19  };
    5.20  
    5.21  static struct xlbd_type_info xlbd_scsi_type = {
    5.22 @@ -59,7 +60,8 @@ static struct xlbd_type_info xlbd_scsi_t
    5.23      .hardsect_size = 512,
    5.24      .max_sectors = 128*8, /* XXX 128; */
    5.25      // XXXcl todo read_ahead[major]    = 0; /* XXX 8; -- guessing */
    5.26 -    .name = "sd",
    5.27 +    .devname = "sd",
    5.28 +    .diskname = "sd",
    5.29  };
    5.30  
    5.31  static struct xlbd_type_info xlbd_vbd_type = {
    5.32 @@ -69,7 +71,8 @@ static struct xlbd_type_info xlbd_vbd_ty
    5.33      .hardsect_size = 512,
    5.34      .max_sectors = 128,
    5.35      // XXXcl todo read_ahead[major]    = 8;
    5.36 -    .name = "xvd",
    5.37 +    .devname = "xvd",
    5.38 +    .diskname = "xvd",
    5.39  };
    5.40  
    5.41  static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
    5.42 @@ -183,13 +186,13 @@ static struct xlbd_major_info *xlbd_get_
    5.43      }
    5.44      major_info[mi_idx]->major = new_major;
    5.45  
    5.46 -    if (register_blkdev(major_info[mi_idx]->major, major_info[mi_idx]->type->name)) {
    5.47 +    if (register_blkdev(major_info[mi_idx]->major, major_info[mi_idx]->type->devname)) {
    5.48          printk(KERN_ALERT "XL VBD: can't get major %d with name %s\n",
    5.49 -               major_info[mi_idx]->major, major_info[mi_idx]->type->name);
    5.50 +               major_info[mi_idx]->major, major_info[mi_idx]->type->devname);
    5.51          goto out;
    5.52      }
    5.53  
    5.54 -    devfs_mk_dir(major_info[mi_idx]->type->name);
    5.55 +    devfs_mk_dir(major_info[mi_idx]->type->devname);
    5.56  
    5.57      return major_info[mi_idx];
    5.58  
    5.59 @@ -226,7 +229,7 @@ static struct gendisk *xlvbd_get_gendisk
    5.60      gd->first_minor = xd_minor;
    5.61      gd->fops = &xlvbd_block_fops;
    5.62      gd->private_data = di;
    5.63 -    sprintf(gd->disk_name, "%s%c%d", mi->type->name,
    5.64 +    sprintf(gd->disk_name, "%s%c%d", mi->type->diskname,
    5.65              'a' + mi->index * mi->type->partn_per_major +
    5.66              (xd_minor >> mi->type->partn_shift),
    5.67              xd_minor & ((1 << mi->type->partn_shift) - 1));
     6.1 --- a/linux-2.6.10-xen-sparse/drivers/xen/netback/netback.c	Sat Feb 12 16:34:28 2005 +0000
     6.2 +++ b/linux-2.6.10-xen-sparse/drivers/xen/netback/netback.c	Sat Feb 12 17:26:12 2005 +0000
     6.3 @@ -1,11 +1,11 @@
     6.4  /******************************************************************************
     6.5 - * arch/xen/drivers/netif/backend/main.c
     6.6 + * drivers/xen/netback/netback.c
     6.7   * 
     6.8 - * Back-end of the driver for virtual block devices. This portion of the
     6.9 - * driver exports a 'unified' block-device interface that can be accessed
    6.10 + * Back-end of the driver for virtual network devices. This portion of the
    6.11 + * driver exports a 'unified' network-device interface that can be accessed
    6.12   * by any operating system that implements a compatible front end. A 
    6.13   * reference front-end implementation can be found in:
    6.14 - *  arch/xen/drivers/netif/frontend
    6.15 + *  drivers/xen/netfront/netfront.c
    6.16   * 
    6.17   * Copyright (c) 2002-2004, K A Fraser
    6.18   */
     7.1 --- a/linux-2.6.10-xen-sparse/drivers/xen/netfront/netfront.c	Sat Feb 12 16:34:28 2005 +0000
     7.2 +++ b/linux-2.6.10-xen-sparse/drivers/xen/netfront/netfront.c	Sat Feb 12 17:26:12 2005 +0000
     7.3 @@ -41,6 +41,8 @@
     7.4  #include <linux/bitops.h>
     7.5  #include <net/sock.h>
     7.6  #include <net/pkt_sched.h>
     7.7 +#include <net/arp.h>
     7.8 +#include <net/route.h>
     7.9  #include <asm/io.h>
    7.10  #include <asm-xen/evtchn.h>
    7.11  #include <asm-xen/ctrl_if.h>
    7.12 @@ -48,11 +50,6 @@
    7.13  #include <asm-xen/balloon.h>
    7.14  #include <asm/page.h>
    7.15  
    7.16 -#include <net/arp.h>
    7.17 -#include <net/route.h>
    7.18 -
    7.19 -#define DEBUG 0
    7.20 -
    7.21  #ifndef __GFP_NOWARN
    7.22  #define __GFP_NOWARN 0
    7.23  #endif
    7.24 @@ -63,7 +60,7 @@
    7.25          atomic_set(&(skb_shinfo(_skb)->dataref), 1);  \
    7.26          skb_shinfo(_skb)->nr_frags = 0;               \
    7.27          skb_shinfo(_skb)->frag_list = NULL;           \
    7.28 -    } while ( 0 )
    7.29 +    } while (0)
    7.30  
    7.31  /* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
    7.32  #define RX_HEADROOM 200
    7.33 @@ -171,10 +168,9 @@ static struct net_device *find_dev_by_ha
    7.34  {
    7.35      struct list_head *ent;
    7.36      struct net_private *np;
    7.37 -    list_for_each ( ent, &dev_list )
    7.38 -    {
    7.39 +    list_for_each (ent, &dev_list) {
    7.40          np = list_entry(ent, struct net_private, list);
    7.41 -        if ( np->handle == handle )
    7.42 +        if (np->handle == handle)
    7.43              return np->dev;
    7.44      }
    7.45      return NULL;
    7.46 @@ -203,7 +199,7 @@ static void netctrl_init(void)
    7.47   */
    7.48  static int netctrl_err(int err)
    7.49  {
    7.50 -    if ( (err < 0) && !netctrl.err )
    7.51 +    if ((err < 0) && !netctrl.err)
    7.52          netctrl.err = err;
    7.53      return netctrl.err;
    7.54  }
    7.55 @@ -216,9 +212,9 @@ static int netctrl_connected(void)
    7.56  {
    7.57      int ok;
    7.58  
    7.59 -    if ( netctrl.err )
    7.60 +    if (netctrl.err)
    7.61          ok = netctrl.err;
    7.62 -    else if ( netctrl.up == NETIF_DRIVER_STATUS_UP )
    7.63 +    else if (netctrl.up == NETIF_DRIVER_STATUS_UP)
    7.64          ok = (netctrl.connected_n == netctrl.interface_n);
    7.65      else
    7.66          ok = 0;
    7.67 @@ -266,14 +262,14 @@ static int send_fake_arp(struct net_devi
    7.68      src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
    7.69  
    7.70      /* No IP? Then nothing to do. */
    7.71 -    if ( src_ip == 0 )
    7.72 +    if (src_ip == 0)
    7.73          return 0;
    7.74  
    7.75      skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
    7.76                       dst_ip, dev, src_ip,
    7.77                       /*dst_hw*/ NULL, /*src_hw*/ NULL, 
    7.78                       /*target_hw*/ dev->dev_addr);
    7.79 -    if ( skb == NULL )
    7.80 +    if (skb == NULL)
    7.81          return -ENOMEM;
    7.82  
    7.83      return dev_queue_xmit(skb);
    7.84 @@ -302,15 +298,14 @@ static void network_tx_buf_gc(struct net
    7.85      struct net_private *np = netdev_priv(dev);
    7.86      struct sk_buff *skb;
    7.87  
    7.88 -    if ( np->backend_state != BEST_CONNECTED )
    7.89 +    if (np->backend_state != BEST_CONNECTED)
    7.90          return;
    7.91  
    7.92      do {
    7.93          prod = np->tx->resp_prod;
    7.94          rmb(); /* Ensure we see responses up to 'rp'. */
    7.95  
    7.96 -        for ( i = np->tx_resp_cons; i != prod; i++ )
    7.97 -        {
    7.98 +        for (i = np->tx_resp_cons; i != prod; i++) {
    7.99              id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
   7.100              skb = np->tx_skbs[id];
   7.101              ADD_ID_TO_FREELIST(np->tx_skbs, id);
   7.102 @@ -330,14 +325,11 @@ static void network_tx_buf_gc(struct net
   7.103          np->tx->event = 
   7.104              prod + ((np->tx->req_prod - prod) >> 1) + 1;
   7.105          mb();
   7.106 -    }
   7.107 -    while ( prod != np->tx->resp_prod );
   7.108 +    } while (prod != np->tx->resp_prod);
   7.109  
   7.110 -    if ( np->tx_full && 
   7.111 -         ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE) )
   7.112 -    {
   7.113 +    if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
   7.114          np->tx_full = 0;
   7.115 -        if ( np->user_state == UST_OPEN )
   7.116 +        if (np->user_state == UST_OPEN)
   7.117              netif_wake_queue(dev);
   7.118      }
   7.119  }
   7.120 @@ -351,7 +343,7 @@ static void network_alloc_rx_buffers(str
   7.121      int i, batch_target;
   7.122      NETIF_RING_IDX req_prod = np->rx->req_prod;
   7.123  
   7.124 -    if ( unlikely(np->backend_state != BEST_CONNECTED) )
   7.125 +    if (unlikely(np->backend_state != BEST_CONNECTED))
   7.126          return;
   7.127  
   7.128      /*
   7.129 @@ -361,20 +353,18 @@ static void network_alloc_rx_buffers(str
   7.130       * ourself and for other kernel subsystems.
   7.131       */
   7.132      batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
   7.133 -    for ( i = skb_queue_len(&np->rx_batch); i < batch_target; i++ )
   7.134 -    {
   7.135 -        if ( unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL) )
   7.136 +    for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
   7.137 +        if (unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL))
   7.138              break;
   7.139          __skb_queue_tail(&np->rx_batch, skb);
   7.140      }
   7.141  
   7.142      /* Is the batch large enough to be worthwhile? */
   7.143 -    if ( i < (np->rx_target/2)  )
   7.144 +    if (i < (np->rx_target/2))
   7.145          return;
   7.146  
   7.147 -    for ( i = 0; ; i++ )
   7.148 -    {
   7.149 -        if ( (skb = __skb_dequeue(&np->rx_batch)) == NULL )
   7.150 +    for (i = 0; ; i++) {
   7.151 +        if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
   7.152              break;
   7.153  
   7.154          skb->dev = dev;
   7.155 @@ -421,15 +411,15 @@ static void network_alloc_rx_buffers(str
   7.156      (void)HYPERVISOR_multicall(rx_mcl, i+1);
   7.157  
   7.158      /* Check return status of HYPERVISOR_dom_mem_op(). */
   7.159 -    if ( unlikely(rx_mcl[i].args[5] != i) )
   7.160 +    if (unlikely(rx_mcl[i].args[5] != i))
   7.161          panic("Unable to reduce memory reservation\n");
   7.162  
   7.163      /* Above is a suitable barrier to ensure backend will see requests. */
   7.164      np->rx->req_prod = req_prod + i;
   7.165  
   7.166      /* Adjust our floating fill target if we risked running out of buffers. */
   7.167 -    if ( ((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
   7.168 -         ((np->rx_target *= 2) > RX_MAX_TARGET) )
   7.169 +    if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
   7.170 +         ((np->rx_target *= 2) > RX_MAX_TARGET))
   7.171          np->rx_target = RX_MAX_TARGET;
   7.172  }
   7.173  
   7.174 @@ -441,18 +431,16 @@ static int network_start_xmit(struct sk_
   7.175      netif_tx_request_t *tx;
   7.176      NETIF_RING_IDX i;
   7.177  
   7.178 -    if ( unlikely(np->tx_full) )
   7.179 -    {
   7.180 +    if (unlikely(np->tx_full)) {
   7.181          printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
   7.182          netif_stop_queue(dev);
   7.183          goto drop;
   7.184      }
   7.185  
   7.186 -    if ( unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
   7.187 -                  PAGE_SIZE) )
   7.188 -    {
   7.189 +    if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
   7.190 +                  PAGE_SIZE)) {
   7.191          struct sk_buff *nskb;
   7.192 -        if ( unlikely((nskb = alloc_xen_skb(skb->len)) == NULL) )
   7.193 +        if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
   7.194              goto drop;
   7.195          skb_put(nskb, skb->len);
   7.196          memcpy(nskb->data, skb->data, skb->len);
   7.197 @@ -463,8 +451,7 @@ static int network_start_xmit(struct sk_
   7.198      
   7.199      spin_lock_irq(&np->tx_lock);
   7.200  
   7.201 -    if ( np->backend_state != BEST_CONNECTED )
   7.202 -    {
   7.203 +    if (np->backend_state != BEST_CONNECTED) {
   7.204          spin_unlock_irq(&np->tx_lock);
   7.205          goto drop;
   7.206      }
   7.207 @@ -485,8 +472,7 @@ static int network_start_xmit(struct sk_
   7.208  
   7.209      network_tx_buf_gc(dev);
   7.210  
   7.211 -    if ( (i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1) )
   7.212 -    {
   7.213 +    if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
   7.214          np->tx_full = 1;
   7.215          netif_stop_queue(dev);
   7.216      }
   7.217 @@ -498,7 +484,7 @@ static int network_start_xmit(struct sk_
   7.218  
   7.219      /* Only notify Xen if we really have to. */
   7.220      mb();
   7.221 -    if ( np->tx->TX_TEST_IDX == i )
   7.222 +    if (np->tx->TX_TEST_IDX == i)
   7.223          notify_via_evtchn(np->evtchn);
   7.224  
   7.225      return 0;
   7.226 @@ -509,7 +495,6 @@ static int network_start_xmit(struct sk_
   7.227      return 0;
   7.228  }
   7.229  
   7.230 -
   7.231  static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
   7.232  {
   7.233      struct net_device *dev = dev_id;
   7.234 @@ -520,8 +505,7 @@ static irqreturn_t netif_int(int irq, vo
   7.235      network_tx_buf_gc(dev);
   7.236      spin_unlock_irqrestore(&np->tx_lock, flags);
   7.237  
   7.238 -    if ( (np->rx_resp_cons != np->rx->resp_prod) &&
   7.239 -         (np->user_state == UST_OPEN) )
   7.240 +    if ((np->rx_resp_cons != np->rx->resp_prod) && (np->user_state == UST_OPEN))
   7.241          netif_rx_schedule(dev);
   7.242  
   7.243      return IRQ_HANDLED;
   7.244 @@ -542,33 +526,30 @@ static int netif_poll(struct net_device 
   7.245  
   7.246      spin_lock(&np->rx_lock);
   7.247  
   7.248 -    if ( np->backend_state != BEST_CONNECTED )
   7.249 -    {
   7.250 +    if (np->backend_state != BEST_CONNECTED) {
   7.251          spin_unlock(&np->rx_lock);
   7.252          return 0;
   7.253      }
   7.254  
   7.255      skb_queue_head_init(&rxq);
   7.256  
   7.257 -    if ( (budget = *pbudget) > dev->quota )
   7.258 +    if ((budget = *pbudget) > dev->quota)
   7.259          budget = dev->quota;
   7.260  
   7.261      rp = np->rx->resp_prod;
   7.262      rmb(); /* Ensure we see queued responses up to 'rp'. */
   7.263  
   7.264 -    for ( i = np->rx_resp_cons, work_done = 0; 
   7.265 -          (i != rp) && (work_done < budget); 
   7.266 -          i++, work_done++ )
   7.267 -    {
   7.268 +    for (i = np->rx_resp_cons, work_done = 0; 
   7.269 +		    (i != rp) && (work_done < budget);
   7.270 +		    i++, work_done++) {
   7.271          rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
   7.272  
   7.273          /*
   7.274           * An error here is very odd. Usually indicates a backend bug,
   7.275           * low-memory condition, or that we didn't have reservation headroom.
   7.276           */
   7.277 -        if ( unlikely(rx->status <= 0) )
   7.278 -        {
   7.279 -            if ( net_ratelimit() )
   7.280 +        if (unlikely(rx->status <= 0)) {
   7.281 +            if (net_ratelimit())
   7.282                  printk(KERN_WARNING "Bad rx buffer (memory squeeze?).\n");
   7.283              np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
   7.284              wmb();
   7.285 @@ -608,8 +589,7 @@ static int netif_poll(struct net_device 
   7.286      balloon_update_driver_allowance(-work_done);
   7.287  
   7.288      /* Do all the remapping work, and M->P updates, in one big hypercall. */
   7.289 -    if ( likely((mcl - rx_mcl) != 0) )
   7.290 -    {
   7.291 +    if (likely((mcl - rx_mcl) != 0)) {
   7.292          mcl->op = __HYPERVISOR_mmu_update;
   7.293          mcl->args[0] = (unsigned long)rx_mmu;
   7.294          mcl->args[1] = mmu - rx_mmu;
   7.295 @@ -618,33 +598,29 @@ static int netif_poll(struct net_device 
   7.296          (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
   7.297      }
   7.298  
   7.299 -    while ( (skb = __skb_dequeue(&rxq)) != NULL )
   7.300 -    {
   7.301 +    while ((skb = __skb_dequeue(&rxq)) != NULL) {
   7.302          /*
   7.303           * Enough room in skbuff for the data we were passed? Also, Linux 
   7.304           * expects at least 16 bytes headroom in each receive buffer.
   7.305           */
   7.306 -        if ( unlikely(skb->tail > skb->end) ||
   7.307 -             unlikely((skb->data - skb->head) < 16) )
   7.308 -        {
   7.309 +        if (unlikely(skb->tail > skb->end) || 
   7.310 +			unlikely((skb->data - skb->head) < 16)) {
   7.311              nskb = NULL;
   7.312  
   7.313              /* Only copy the packet if it fits in the current MTU. */
   7.314 -            if ( skb->len <= (dev->mtu + ETH_HLEN) )
   7.315 -            {
   7.316 -                if ( (skb->tail > skb->end) && net_ratelimit() )
   7.317 +            if (skb->len <= (dev->mtu + ETH_HLEN)) {
   7.318 +                if ((skb->tail > skb->end) && net_ratelimit())
   7.319                      printk(KERN_INFO "Received packet needs %d bytes more "
   7.320                             "headroom.\n", skb->tail - skb->end);
   7.321  
   7.322 -                if ( (nskb = alloc_xen_skb(skb->len + 2)) != NULL )
   7.323 -                {
   7.324 +                if ((nskb = alloc_xen_skb(skb->len + 2)) != NULL) {
   7.325                      skb_reserve(nskb, 2);
   7.326                      skb_put(nskb, skb->len);
   7.327                      memcpy(nskb->data, skb->data, skb->len);
   7.328                      nskb->dev = skb->dev;
   7.329                  }
   7.330              }
   7.331 -            else if ( net_ratelimit() )
   7.332 +            else if (net_ratelimit())
   7.333                  printk(KERN_INFO "Received packet too big for MTU "
   7.334                         "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu);
   7.335  
   7.336 @@ -655,7 +631,7 @@ static int netif_poll(struct net_device 
   7.337              dev_kfree_skb(skb);
   7.338  
   7.339              /* Switch old for new, if we copied the buffer. */
   7.340 -            if ( (skb = nskb) == NULL )
   7.341 +            if ((skb = nskb) == NULL)
   7.342                  continue;
   7.343          }
   7.344          
   7.345 @@ -674,8 +650,8 @@ static int netif_poll(struct net_device 
   7.346  
   7.347      /* If we get a callback with very few responses, reduce fill target. */
   7.348      /* NB. Note exponential increase, linear decrease. */
   7.349 -    if ( ((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
   7.350 -         (--np->rx_target < RX_MIN_TARGET) )
   7.351 +    if (((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
   7.352 +         (--np->rx_target < RX_MIN_TARGET))
   7.353          np->rx_target = RX_MIN_TARGET;
   7.354  
   7.355      network_alloc_rx_buffers(dev);
   7.356 @@ -683,16 +659,14 @@ static int netif_poll(struct net_device 
   7.357      *pbudget   -= work_done;
   7.358      dev->quota -= work_done;
   7.359  
   7.360 -    if ( work_done < budget )
   7.361 -    {
   7.362 +    if (work_done < budget) {
   7.363          local_irq_save(flags);
   7.364  
   7.365          np->rx->event = i + 1;
   7.366      
   7.367          /* Deal with hypervisor racing our resetting of rx_event. */
   7.368          mb();
   7.369 -        if ( np->rx->resp_prod == i )
   7.370 -        {
   7.371 +        if (np->rx->resp_prod == i) {
   7.372              __netif_rx_complete(dev);
   7.373              more_to_do = 0;
   7.374          }
   7.375 @@ -755,10 +729,8 @@ static void network_connect(struct net_d
   7.376       * to avoid this but maybe it doesn't matter so much given the
   7.377       * interface has been down.
   7.378       */
   7.379 -    for ( requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++ )
   7.380 -    {
   7.381 -            if ( (unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET )
   7.382 -            {
   7.383 +    for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
   7.384 +            if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
   7.385                  struct sk_buff *skb = np->tx_skbs[i];
   7.386                  
   7.387                  tx = &np->tx->ring[requeue_idx++].req;
   7.388 @@ -775,8 +747,8 @@ static void network_connect(struct net_d
   7.389      np->tx->req_prod = requeue_idx;
   7.390  
   7.391      /* Rebuild the RX buffer freelist and the RX ring itself. */
   7.392 -    for ( requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++ )
   7.393 -        if ( (unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET )
   7.394 +    for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++)
   7.395 +        if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET)
   7.396              np->rx->ring[requeue_idx++].req.id = i;
   7.397      wmb();                
   7.398      np->rx->req_prod = requeue_idx;
   7.399 @@ -791,7 +763,7 @@ static void network_connect(struct net_d
   7.400      notify_via_evtchn(status->evtchn);  
   7.401      network_tx_buf_gc(dev);
   7.402  
   7.403 -    if ( np->user_state == UST_OPEN )
   7.404 +    if (np->user_state == UST_OPEN)
   7.405          netif_start_queue(dev);
   7.406  
   7.407      spin_unlock(&np->rx_lock);
   7.408 @@ -917,9 +889,7 @@ static void vif_disconnect(struct net_pr
   7.409   * is initiated by a special "RESET" message - disconnect could
   7.410   * just mean we're not allowed to use this interface any more.
   7.411   */
   7.412 -static void 
   7.413 -vif_reset(
   7.414 -    struct net_private *np)
   7.415 +static void vif_reset(struct net_private *np)
   7.416  {
   7.417      IPRINTK("Attempting to reconnect network interface: handle=%u\n",
   7.418              np->handle);    
   7.419 @@ -932,9 +902,8 @@ vif_reset(
   7.420   * Sets the mac and event channel from the message.
   7.421   * Binds the irq to the event channel.
   7.422   */
   7.423 -static void
   7.424 -vif_connect(
   7.425 -    struct net_private *np, netif_fe_interface_status_t *status)
   7.426 +static void 
   7.427 +vif_connect(struct net_private *np, netif_fe_interface_status_t *status)
   7.428  {
   7.429      struct net_device *dev = np->dev;
   7.430      memcpy(dev->dev_addr, status->mac, ETH_ALEN);
   7.431 @@ -959,8 +928,7 @@ static int create_netdev(int handle, str
   7.432      struct net_device *dev = NULL;
   7.433      struct net_private *np = NULL;
   7.434  
   7.435 -    if ( (dev = alloc_etherdev(sizeof(struct net_private))) == NULL )
   7.436 -    {
   7.437 +    if ((dev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
   7.438          printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
   7.439          err = -ENOMEM;
   7.440          goto exit;
   7.441 @@ -978,9 +946,9 @@ static int create_netdev(int handle, str
   7.442      np->rx_target = RX_MIN_TARGET;
   7.443  
   7.444      /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
   7.445 -    for ( i = 0; i <= NETIF_TX_RING_SIZE; i++ )
   7.446 +    for (i = 0; i <= NETIF_TX_RING_SIZE; i++)
   7.447          np->tx_skbs[i] = (void *)(i+1);
   7.448 -    for ( i = 0; i <= NETIF_RX_RING_SIZE; i++ )
   7.449 +    for (i = 0; i <= NETIF_RX_RING_SIZE; i++)
   7.450          np->rx_skbs[i] = (void *)(i+1);
   7.451  
   7.452      dev->open            = network_open;
   7.453 @@ -990,8 +958,7 @@ static int create_netdev(int handle, str
   7.454      dev->poll            = netif_poll;
   7.455      dev->weight          = 64;
   7.456      
   7.457 -    if ( (err = register_netdev(dev)) != 0 )
   7.458 -    {
   7.459 +    if ((err = register_netdev(dev)) != 0) {
   7.460          printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
   7.461          goto exit;
   7.462      }
   7.463 @@ -999,9 +966,9 @@ static int create_netdev(int handle, str
   7.464      list_add(&np->list, &dev_list);
   7.465  
   7.466    exit:
   7.467 -    if ( (err != 0) && (dev != NULL ) )
   7.468 +    if ((err != 0) && (dev != NULL ))
   7.469          kfree(dev);
   7.470 -    else if ( val != NULL )
   7.471 +    else if (val != NULL)
   7.472          *val = dev;
   7.473      return err;
   7.474  }
   7.475 @@ -1015,36 +982,34 @@ static int create_netdev(int handle, str
   7.476   * @return 0 on success, error code otherwise
   7.477   */
   7.478  static int 
   7.479 -target_vif(
   7.480 -    netif_fe_interface_status_t *status, struct net_private **np)
   7.481 +target_vif(netif_fe_interface_status_t *status, struct net_private **np)
   7.482  {
   7.483      int err = 0;
   7.484      struct net_device *dev;
   7.485  
   7.486      DPRINTK("> handle=%d\n", status->handle);
   7.487 -    if ( status->handle < 0 )
   7.488 -    {
   7.489 +    if (status->handle < 0) {
   7.490          err = -EINVAL;
   7.491          goto exit;
   7.492      }
   7.493  
   7.494 -    if ( (dev = find_dev_by_handle(status->handle)) != NULL )
   7.495 +    if ((dev = find_dev_by_handle(status->handle)) != NULL)
   7.496          goto exit;
   7.497  
   7.498 -    if ( status->status == NETIF_INTERFACE_STATUS_CLOSED )
   7.499 +    if (status->status == NETIF_INTERFACE_STATUS_CLOSED)
   7.500          goto exit;
   7.501 -    if ( status->status == NETIF_INTERFACE_STATUS_CHANGED )
   7.502 +    if (status->status == NETIF_INTERFACE_STATUS_CHANGED)
   7.503          goto exit;
   7.504  
   7.505      /* It's a new interface in a good state - create it. */
   7.506      DPRINTK("> create device...\n");
   7.507 -    if ( (err = create_netdev(status->handle, &dev)) != 0 )
   7.508 +    if ((err = create_netdev(status->handle, &dev)) != 0)
   7.509          goto exit;
   7.510  
   7.511      netctrl.interface_n++;
   7.512  
   7.513    exit:
   7.514 -    if ( np != NULL )
   7.515 +    if (np != NULL)
   7.516          *np = ((dev && !err) ? netdev_priv(dev) : NULL);
   7.517      DPRINTK("< err=%d\n", err);
   7.518      return err;
   7.519 @@ -1059,23 +1024,19 @@ static void netif_interface_status(netif
   7.520      DPRINTK("> status=%s handle=%d\n",
   7.521              status_name[status->status], status->handle);
   7.522  
   7.523 -    if ( (err = target_vif(status, &np)) != 0 )
   7.524 -    {
   7.525 +    if ((err = target_vif(status, &np)) != 0) {
   7.526          WPRINTK("Invalid netif: handle=%u\n", status->handle);
   7.527          return;
   7.528      }
   7.529  
   7.530 -    if ( np == NULL )
   7.531 -    {
   7.532 +    if (np == NULL) {
   7.533          DPRINTK("> no vif\n");
   7.534          return;
   7.535      }
   7.536  
   7.537 -    switch ( status->status )
   7.538 -    {
   7.539 +    switch (status->status) {
   7.540      case NETIF_INTERFACE_STATUS_CLOSED:
   7.541 -        switch ( np->backend_state )
   7.542 -        {
   7.543 +        switch (np->backend_state) {
   7.544          case BEST_CLOSED:
   7.545          case BEST_DISCONNECTED:
   7.546          case BEST_CONNECTED:
   7.547 @@ -1085,8 +1046,7 @@ static void netif_interface_status(netif
   7.548          break;
   7.549  
   7.550      case NETIF_INTERFACE_STATUS_DISCONNECTED:
   7.551 -        switch ( np->backend_state )
   7.552 -        {
   7.553 +        switch (np->backend_state) {
   7.554          case BEST_CLOSED:
   7.555              vif_disconnect(np);
   7.556              break;
   7.557 @@ -1098,8 +1058,7 @@ static void netif_interface_status(netif
   7.558          break;
   7.559  
   7.560      case NETIF_INTERFACE_STATUS_CONNECTED:
   7.561 -        switch ( np->backend_state )
   7.562 -        {
   7.563 +        switch (np->backend_state) {
   7.564          case BEST_CLOSED:
   7.565              WPRINTK("Unexpected netif status %s in state %s\n",
   7.566                      status_name[status->status],
   7.567 @@ -1141,20 +1100,17 @@ static void netif_driver_status(netif_fe
   7.568  static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
   7.569  {
   7.570  
   7.571 -    switch ( msg->subtype )
   7.572 -    {
   7.573 +    switch (msg->subtype) {
   7.574      case CMSG_NETIF_FE_INTERFACE_STATUS:
   7.575 -        if ( msg->length != sizeof(netif_fe_interface_status_t) )
   7.576 +        if (msg->length != sizeof(netif_fe_interface_status_t))
   7.577              goto error;
   7.578 -        netif_interface_status((netif_fe_interface_status_t *)
   7.579 -                               &msg->msg[0]);
   7.580 +        netif_interface_status((netif_fe_interface_status_t *) &msg->msg[0]);
   7.581          break;
   7.582  
   7.583      case CMSG_NETIF_FE_DRIVER_STATUS:
   7.584 -        if ( msg->length != sizeof(netif_fe_driver_status_t) )
   7.585 +        if (msg->length != sizeof(netif_fe_driver_status_t))
   7.586              goto error;
   7.587 -        netif_driver_status((netif_fe_driver_status_t *)
   7.588 -                            &msg->msg[0]);
   7.589 +        netif_driver_status((netif_fe_driver_status_t *) &msg->msg[0]);
   7.590          break;
   7.591  
   7.592      error:
   7.593 @@ -1179,8 +1135,7 @@ static int probe_interfaces(void)
   7.594  
   7.595      DPRINTK(">\n");
   7.596  
   7.597 -    for ( wait_i = 0; wait_i < wait_n; wait_i++)
   7.598 -    { 
   7.599 +    for (wait_i = 0; wait_i < wait_n; wait_i++) { 
   7.600          DPRINTK("> wait_i=%d\n", wait_i);
   7.601          conn = netctrl_connected();
   7.602          if(conn) break;
   7.603 @@ -1190,8 +1145,7 @@ static int probe_interfaces(void)
   7.604      }
   7.605  
   7.606      DPRINTK("> wait finished...\n");
   7.607 -    if ( conn <= 0 )
   7.608 -    {
   7.609 +    if (conn <= 0) {
   7.610          err = netctrl_err(-ENETDOWN);
   7.611          WPRINTK("Failed to connect all virtual interfaces: err=%d\n", err);
   7.612      }
   7.613 @@ -1223,8 +1177,7 @@ static int probe_interfaces(void)
   7.614      DPRINTK(">\n");
   7.615  
   7.616      netctrl.interface_n = 0;
   7.617 -    for ( wait_i = 0; wait_i < wait_n; wait_i++ )
   7.618 -    { 
   7.619 +    for (wait_i = 0; wait_i < wait_n; wait_i++) { 
   7.620          DPRINTK("> wait_i=%d query=%d\n", wait_i, query);
   7.621          msg.handle = query;
   7.622          memcpy(cmsg.msg, &msg, sizeof(msg));
   7.623 @@ -1236,7 +1189,7 @@ static int probe_interfaces(void)
   7.624          DPRINTK("> err=%d\n", err);
   7.625          if(err) goto exit;
   7.626          DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
   7.627 -        if((int)reply->handle < 0){
   7.628 +        if((int)reply->handle < 0) {
   7.629              // No more interfaces.
   7.630              break;
   7.631          }
   7.632 @@ -1246,8 +1199,7 @@ static int probe_interfaces(void)
   7.633      }
   7.634  
   7.635    exit:
   7.636 -    if ( err )
   7.637 -    {
   7.638 +    if (err) {
   7.639          err = netctrl_err(-ENETDOWN);
   7.640          WPRINTK("Connecting virtual network interfaces failed: err=%d\n", err);
   7.641      }
   7.642 @@ -1262,22 +1214,20 @@ static int probe_interfaces(void)
   7.643   * We use this notifier to send out a fake ARP reply to reset switches and
   7.644   * router ARP caches when an IP interface is brought up on a VIF.
   7.645   */
   7.646 -static int inetdev_notify(struct notifier_block *this, 
   7.647 -                          unsigned long event, 
   7.648 -                          void *ptr)
   7.649 +static int 
   7.650 +inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
   7.651  {
   7.652      struct in_ifaddr  *ifa = (struct in_ifaddr *)ptr; 
   7.653      struct net_device *dev = ifa->ifa_dev->dev;
   7.654      struct list_head  *ent;
   7.655      struct net_private *np;
   7.656  
   7.657 -    if ( event != NETDEV_UP )
   7.658 +    if (event != NETDEV_UP)
   7.659          goto out;
   7.660  
   7.661 -    list_for_each ( ent, &dev_list )
   7.662 -    {
   7.663 +    list_for_each (ent, &dev_list) {
   7.664          np = list_entry(ent, struct net_private, list);
   7.665 -        if ( np->dev == dev )
   7.666 +        if (np->dev == dev)
   7.667              (void)send_fake_arp(dev);
   7.668      }
   7.669          
   7.670 @@ -1295,7 +1245,7 @@ static int __init netif_init(void)
   7.671  {
   7.672      int err = 0;
   7.673  
   7.674 -    if ( xen_start_info.flags & SIF_INITDOMAIN )
   7.675 +    if (xen_start_info.flags & SIF_INITDOMAIN)
   7.676          return 0;
   7.677  
   7.678      IPRINTK("Initialising virtual ethernet driver.\n");
   7.679 @@ -1306,7 +1256,7 @@ static int __init netif_init(void)
   7.680                                      CALLBACK_IN_BLOCKING_CONTEXT);
   7.681      send_driver_status(1);
   7.682      err = probe_interfaces();
   7.683 -    if ( err )
   7.684 +    if (err)
   7.685          ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
   7.686  
   7.687      DPRINTK("< err=%d\n", err);
   7.688 @@ -1338,8 +1288,7 @@ void netif_suspend(void)
   7.689      struct list_head *ent;
   7.690      struct net_private *np;
   7.691      
   7.692 -    list_for_each ( ent, &dev_list )
   7.693 -    {
   7.694 +    list_for_each (ent, &dev_list) {
   7.695          np = list_entry(ent, struct net_private, list);
   7.696          vif_suspend(np);
   7.697      }
   7.698 @@ -1350,8 +1299,7 @@ void netif_resume(void)
   7.699      struct list_head *ent;
   7.700      struct net_private *np;
   7.701  
   7.702 -    list_for_each ( ent, &dev_list )
   7.703 -    {
   7.704 +    list_for_each (ent, &dev_list) {
   7.705          np = list_entry(ent, struct net_private, list);
   7.706          vif_resume(np);
   7.707      }
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/tools/vnet/00INSTALL	Sat Feb 12 17:26:12 2005 +0000
     8.3 @@ -0,0 +1,31 @@
     8.4 +
     8.5 +To compile and install run "make install"; if it fails or you need to reinstall 
     8.6 +run "make clean" first or the build will fail, at least that is what I have 
     8.7 +found under 2.6.10.
     8.8 +
     8.9 +Other important items:
    8.10 +1)	You will need to have your xen0 kernel compiled with HMAC_SUPPORT 
    8.11 +	2.6.x = (MAIN MENU: Cryptographic Options -> HMAC Support)
    8.12 +	BEFORE running "make install".
    8.13 +
    8.14 +2)	You will want at least some of the other alogorithms listed under
    8.15 +	"Cryptographic Options" for the kernel compiled as modules.
    8.16 +
    8.17 +3)	You will want the networking IPsec/VLAN options compiled in as modules
    8.18 +	2.6.x = (MAIN MENU: Device Drivers -> Networking Support -> 
    8.19 +				Networking Options ->
    8.20 +					IP: AH transformation
    8.21 +					IP: ESP transformation
    8.22 +					IP: IPComp transformation 
    8.23 +					IP: tunnel transformation
    8.24 +
    8.25 +					IPsec user configuration interface
    8.26 +	
    8.27 +					802.1Q VLAN Support
    8.28 +
    8.29 +4)	The module (vnet_module) will not properly load from the command line
    8.30 +	with a "modprobe vnet_module".  Use network-vnet to properly configure
    8.31 +	your system and load the module for you.
    8.32 +
    8.33 +Please refer to the additional documentation found in tools/vnet/doc for
    8.34 +proper syntax and config file parameters.
     9.1 --- a/tools/vnet/Makefile	Sat Feb 12 16:34:28 2005 +0000
     9.2 +++ b/tools/vnet/Makefile	Sat Feb 12 17:26:12 2005 +0000
     9.3 @@ -20,10 +20,10 @@ gc: gc.tar.gz
     9.4  	tar xfz gc.tar.gz
     9.5  	ln -sf gc?.? gc
     9.6  
     9.7 -gc/Makefile:
     9.8 -	(cd gc && ./configure --prefix=`pwd`/install)
     9.9 +gc/.configure-makefile:
    9.10 +	(cd gc && ./configure --prefix=`pwd`/install && touch .configure-makefile)
    9.11  
    9.12 -gc-install: gc gc/Makefile
    9.13 +gc-install: gc gc/.configure-makefile
    9.14  	make -C gc
    9.15  	make -C gc install
    9.16  
    10.1 --- a/tools/vnet/vnet-module/if_varp.h	Sat Feb 12 16:34:28 2005 +0000
    10.2 +++ b/tools/vnet/vnet-module/if_varp.h	Sat Feb 12 17:26:12 2005 +0000
    10.3 @@ -36,7 +36,7 @@ typedef struct VnetMsgHdr {
    10.4  } __attribute__((packed)) VnetMsgHdr;
    10.5  
    10.6  typedef struct VarpHdr {
    10.7 -    VnetMsgHdr;
    10.8 +    VnetMsgHdr hdr;
    10.9      uint32_t vnet;
   10.10      Vmac vmac;
   10.11      uint32_t addr;
    11.1 --- a/tools/vnet/vnet-module/varp.c	Sat Feb 12 16:34:28 2005 +0000
    11.2 +++ b/tools/vnet/vnet-module/varp.c	Sat Feb 12 17:26:12 2005 +0000
    11.3 @@ -368,8 +368,8 @@ int varp_send(u16 opcode, struct net_dev
    11.4      // Varp header.
    11.5      varph = (void*)skb_put(skbout, varp_n);
    11.6      *varph = (VarpHdr){};
    11.7 -    varph->id                = htons(VARP_ID);
    11.8 -    varph->opcode            = htons(opcode);
    11.9 +    varph->hdr.id            = htons(VARP_ID);
   11.10 +    varph->hdr.opcode        = htons(opcode);
   11.11      varph->vnet              = htonl(vnet);
   11.12      varph->vmac              = *vmac;
   11.13      varph->addr              = saddr;
   11.14 @@ -1076,9 +1076,9 @@ int varp_handle_message(struct sk_buff *
   11.15          goto exit;
   11.16      }
   11.17      mine = 1;
   11.18 -    if(varph->id != htons(VARP_ID)){
   11.19 +    if(varph->hdr.id != htons(VARP_ID)){
   11.20          // It's not varp at all - ignore it.
   11.21 -        wprintf("> Unknown id: %d \n", ntohs(varph->id));
   11.22 +        wprintf("> Unknown id: %d \n", ntohs(varph->hdr.id));
   11.23          goto exit;
   11.24      }
   11.25      if(1){
   11.26 @@ -1086,13 +1086,13 @@ int varp_handle_message(struct sk_buff *
   11.27                  NIPQUAD(skb->nh.iph->saddr), NIPQUAD(skb->nh.iph->daddr));
   11.28          dprintf("> sport=%u dport=%u\n", ntohs(skb->h.uh->source), ntohs(skb->h.uh->dest));
   11.29          dprintf("> opcode=%d vnet=%u vmac=" MACFMT " addr=" IPFMT "\n",
   11.30 -                ntohs(varph->opcode),
   11.31 +                ntohs(varph->hdr.opcode),
   11.32                  ntohl(varph->vnet),
   11.33                  MAC6TUPLE(varph->vmac.mac),
   11.34                  NIPQUAD(varph->addr));
   11.35          varp_dprint();
   11.36      }
   11.37 -    switch(ntohs(varph->opcode)){
   11.38 +    switch(ntohs(varph->hdr.opcode)){
   11.39      case VARP_OP_REQUEST:
   11.40          err = varp_handle_request(skb, varph);
   11.41          break;
   11.42 @@ -1100,7 +1100,7 @@ int varp_handle_message(struct sk_buff *
   11.43          err = varp_handle_announce(skb, varph);
   11.44          break;
   11.45      default:
   11.46 -        wprintf("> Unknown opcode: %d \n", ntohs(varph->opcode));
   11.47 +        wprintf("> Unknown opcode: %d \n", ntohs(varph->hdr.opcode));
   11.48         break;
   11.49      }
   11.50    exit:
    12.1 --- a/xen/arch/x86/domain.c	Sat Feb 12 16:34:28 2005 +0000
    12.2 +++ b/xen/arch/x86/domain.c	Sat Feb 12 17:26:12 2005 +0000
    12.3 @@ -783,6 +783,7 @@ void domain_relinquish_memory(struct dom
    12.4          if ( pagetable_val(ed->arch.pagetable) != 0 )
    12.5              put_page_and_type(&frame_table[pagetable_val(ed->arch.pagetable) >>
    12.6                                             PAGE_SHIFT]);
    12.7 +        ed->arch.pagetable = mk_pagetable(0);
    12.8      }
    12.9  
   12.10  #ifdef CONFIG_VMX
    13.1 --- a/xen/arch/x86/extable.c	Sat Feb 12 16:34:28 2005 +0000
    13.2 +++ b/xen/arch/x86/extable.c	Sat Feb 12 17:26:12 2005 +0000
    13.3 @@ -63,8 +63,9 @@ search_exception_table(unsigned long add
    13.4  }
    13.5  
    13.6  unsigned long
    13.7 -search_pre_exception_table(unsigned long addr)
    13.8 +search_pre_exception_table(struct xen_regs *regs)
    13.9  {
   13.10 +    unsigned long addr = (unsigned long)regs->eip;
   13.11      unsigned long fixup = search_one_table(
   13.12          __start___pre_ex_table, __stop___pre_ex_table-1, addr);
   13.13      DPRINTK("Pre-exception: %p -> %p\n", addr, fixup);
    14.1 --- a/xen/arch/x86/mm.c	Sat Feb 12 16:34:28 2005 +0000
    14.2 +++ b/xen/arch/x86/mm.c	Sat Feb 12 17:26:12 2005 +0000
    14.3 @@ -1594,10 +1594,26 @@ int do_update_va_mapping(unsigned long v
    14.4              &shadow_linear_pg_table[l1_linear_offset(va)])))) )
    14.5          {
    14.6              /*
    14.7 -             * Since L2's are guranteed RW, failure indicates the page was not 
    14.8 -             * shadowed, so ignore.
    14.9 +             * Since L2's are guranteed RW, failure indicates either that the
   14.10 +             * page was not shadowed, or that the L2 entry has not yet been
   14.11 +             * updated to reflect the shadow.
   14.12               */
   14.13 -            perfc_incrc(shadow_update_va_fail);
   14.14 +            unsigned l2_idx = page_nr >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT);
   14.15 +            l2_pgentry_t gpde = linear_l2_table[l2_idx];
   14.16 +            unsigned long gpfn = l2_pgentry_val(gpde) >> PAGE_SHIFT;
   14.17 +
   14.18 +            if (get_shadow_status(&d->mm, gpfn))
   14.19 +            {
   14.20 +                unsigned long *gl1e = map_domain_mem(gpfn << PAGE_SHIFT);
   14.21 +                unsigned l1_idx = page_nr & (ENTRIES_PER_L1_PAGETABLE - 1);
   14.22 +                gl1e[l1_idx] = sval;
   14.23 +                unmap_domain_mem(gl1e);
   14.24 +                put_shadow_status(&d->mm);
   14.25 +
   14.26 +                perfc_incrc(shadow_update_va_fail1);
   14.27 +            }
   14.28 +            else
   14.29 +                perfc_incrc(shadow_update_va_fail2);
   14.30          }
   14.31  
   14.32          /*
   14.33 @@ -2259,7 +2275,7 @@ void audit_domain(struct domain *d)
   14.34              scan_for_pfn( e, xpfn );            
   14.35      }   
   14.36  
   14.37 -    int i;
   14.38 +    int i, l1, l2;
   14.39      unsigned long pfn;
   14.40      struct list_head *list_ent;
   14.41      struct pfn_info *page;
   14.42 @@ -2318,8 +2334,8 @@ void audit_domain(struct domain *d)
   14.43  
   14.44  
   14.45      /* PHASE 1 */
   14.46 -
   14.47 -    adjust(&frame_table[pagetable_val(d->exec_domain[0]->arch.pagetable)>>PAGE_SHIFT], -1, 1);
   14.48 +    if( pagetable_val(d->exec_domain[0]->arch.pagetable) )
   14.49 +	adjust(&frame_table[pagetable_val(d->exec_domain[0]->arch.pagetable)>>PAGE_SHIFT], -1, 1);
   14.50  
   14.51      list_ent = d->page_list.next;
   14.52      for ( i = 0; (list_ent != &d->page_list); i++ )
   14.53 @@ -2500,6 +2516,7 @@ void audit_domain(struct domain *d)
   14.54  
   14.55      /* PHASE 3 */
   14.56      list_ent = d->page_list.next;
   14.57 +    l1 = l2 = 0;
   14.58      for ( i = 0; (list_ent != &d->page_list); i++ )
   14.59      {
   14.60          unsigned long *pt;
   14.61 @@ -2509,6 +2526,7 @@ void audit_domain(struct domain *d)
   14.62          switch ( page->u.inuse.type_info & PGT_type_mask )
   14.63          {
   14.64          case PGT_l2_page_table:
   14.65 +	    l2++;
   14.66              if ( (page->u.inuse.type_info & PGT_pinned) == PGT_pinned )
   14.67                  adjust( page, 1, 1 );          
   14.68  
   14.69 @@ -2535,6 +2553,7 @@ void audit_domain(struct domain *d)
   14.70              break;
   14.71  
   14.72          case PGT_l1_page_table:
   14.73 +	    l1++;
   14.74              if ( (page->u.inuse.type_info & PGT_pinned) == PGT_pinned )
   14.75                  adjust( page, 1, 1 );
   14.76  
   14.77 @@ -2572,10 +2591,10 @@ void audit_domain(struct domain *d)
   14.78  
   14.79      spin_unlock(&d->page_alloc_lock);
   14.80  
   14.81 -    adjust(&frame_table[pagetable_val(
   14.82 -        d->exec_domain[0]->arch.pagetable)>>PAGE_SHIFT], 1, 1);
   14.83 -
   14.84 -    printk("Audit %d: Done. ctot=%d ttot=%d\n", d->id, ctot, ttot );
   14.85 +    if( pagetable_val(d->exec_domain[0]->arch.pagetable) )
   14.86 +	adjust(&frame_table[pagetable_val(d->exec_domain[0]->arch.pagetable)>>PAGE_SHIFT], 1, 1);
   14.87 +
   14.88 +    printk("Audit %d: Done. pages=%d l1=%d l2=%d ctot=%d ttot=%d\n", d->id, i, l1, l2, ctot, ttot );
   14.89  
   14.90      if ( d != current->domain )
   14.91          domain_unpause(d);
    16.1 --- a/xen/arch/x86/x86_32/entry.S	Sat Feb 12 16:34:28 2005 +0000
    16.2 +++ b/xen/arch/x86/x86_32/entry.S	Sat Feb 12 17:26:12 2005 +0000
    16.3 @@ -487,7 +487,7 @@ exception_with_ints_disabled:
    16.4          testl $(3|X86_EFLAGS_VM),%eax   # interrupts disabled outside Xen?
    16.5          jnz   1b                        # it really does happen!
    16.6                                          #  (e.g., DOM0 X server)
    16.7 -        pushl XREGS_eip(%esp)
    16.8 +        pushl %esp
    16.9          call  search_pre_exception_table
   16.10          addl  $4,%esp
   16.11          testl %eax,%eax                 # no fixup code for faulting EIP?
    18.1 --- a/xen/include/xen/perfc_defn.h	Sat Feb 12 16:34:28 2005 +0000
    18.2 +++ b/xen/include/xen/perfc_defn.h	Sat Feb 12 17:26:12 2005 +0000
    18.3 @@ -25,7 +25,8 @@ PERFCOUNTER_CPU( shadow_l2_table_count, 
    18.4  PERFCOUNTER_CPU( shadow_l1_table_count, "shadow_l1_table count" )
    18.5  PERFCOUNTER_CPU( unshadow_table_count, "unshadow_table count" )
    18.6  PERFCOUNTER_CPU( shadow_fixup_count, "shadow_fixup count" )
    18.7 -PERFCOUNTER_CPU( shadow_update_va_fail, "shadow_update_va_fail" )
    18.8 +PERFCOUNTER_CPU( shadow_update_va_fail1, "shadow_update_va_fail1" )
    18.9 +PERFCOUNTER_CPU( shadow_update_va_fail2, "shadow_update_va_fail2" )
   18.10  
   18.11  /* STATUS counters do not reset when 'P' is hit */
   18.12  PERFSTATUS( shadow_l2_pages, "current # shadow L2 pages" )