ia64/xen-unstable

changeset 3760:736089c11af9

bitkeeper revision 1.1159.1.563 (420ac6a4CzlgvLZ7nmENZh1uJh51Ng)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-2.0-testing.bk
into freefall.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
author iap10@freefall.cl.cam.ac.uk
date Thu Feb 10 02:27:48 2005 +0000 (2005-02-10)
parents bb187d778f52 b5019559f1ca
children 118e0a3af9b0
files .rootkeys linux-2.6.10-xen-sparse/drivers/xen/netfront/netfront.c tools/vnet/INSTALL tools/vnet/Makefile tools/vnet/vnet-module/Makefile-2.6 tools/vnet/vnet-module/if_varp.h tools/vnet/vnet-module/varp.c tools/vnet/vnetd/vcache.c
line diff
     1.1 --- a/.rootkeys	Wed Feb 09 18:57:38 2005 +0000
     1.2 +++ b/.rootkeys	Thu Feb 10 02:27:48 2005 +0000
     1.3 @@ -769,6 +769,7 @@ 40fcefb3yMSrZvApO9ToIi-iQwnchA tools/sv/
     1.4  41013a83z27rKvWIxAfUBMVZ1eDCDg tools/sv/inc/script.js
     1.5  40fcefb3zGC9XNBkSwTEobCoq8YClA tools/sv/inc/style.css
     1.6  41a21888_WlknVWjSxb32Fo13_ujsw tools/vnet/00README
     1.7 +420a9b706I-bN_uPdiy0m3rmDifNNg tools/vnet/INSTALL
     1.8  41a21888bOiOJc7blzRbe4MNJoaYTw tools/vnet/Makefile
     1.9  41a21888mg2k5HeiVjlQYEtJBZT4Qg tools/vnet/doc/vnet-module.txt
    1.10  41a21888cuxfT8wjCdRR6V1lqf5NtA tools/vnet/doc/vnet-xend.txt
     2.1 --- a/linux-2.6.10-xen-sparse/drivers/xen/netfront/netfront.c	Wed Feb 09 18:57:38 2005 +0000
     2.2 +++ b/linux-2.6.10-xen-sparse/drivers/xen/netfront/netfront.c	Thu Feb 10 02:27:48 2005 +0000
     2.3 @@ -41,6 +41,8 @@
     2.4  #include <linux/bitops.h>
     2.5  #include <net/sock.h>
     2.6  #include <net/pkt_sched.h>
     2.7 +#include <net/arp.h>
     2.8 +#include <net/route.h>
     2.9  #include <asm/io.h>
    2.10  #include <asm-xen/evtchn.h>
    2.11  #include <asm-xen/ctrl_if.h>
    2.12 @@ -48,11 +50,6 @@
    2.13  #include <asm-xen/balloon.h>
    2.14  #include <asm/page.h>
    2.15  
    2.16 -#include <net/arp.h>
    2.17 -#include <net/route.h>
    2.18 -
    2.19 -#define DEBUG 0
    2.20 -
    2.21  #ifndef __GFP_NOWARN
    2.22  #define __GFP_NOWARN 0
    2.23  #endif
    2.24 @@ -63,7 +60,7 @@
    2.25          atomic_set(&(skb_shinfo(_skb)->dataref), 1);  \
    2.26          skb_shinfo(_skb)->nr_frags = 0;               \
    2.27          skb_shinfo(_skb)->frag_list = NULL;           \
    2.28 -    } while ( 0 )
    2.29 +    } while (0)
    2.30  
    2.31  /* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
    2.32  #define RX_HEADROOM 200
    2.33 @@ -171,10 +168,9 @@ static struct net_device *find_dev_by_ha
    2.34  {
    2.35      struct list_head *ent;
    2.36      struct net_private *np;
    2.37 -    list_for_each ( ent, &dev_list )
    2.38 -    {
    2.39 +    list_for_each (ent, &dev_list) {
    2.40          np = list_entry(ent, struct net_private, list);
    2.41 -        if ( np->handle == handle )
    2.42 +        if (np->handle == handle)
    2.43              return np->dev;
    2.44      }
    2.45      return NULL;
    2.46 @@ -203,7 +199,7 @@ static void netctrl_init(void)
    2.47   */
    2.48  static int netctrl_err(int err)
    2.49  {
    2.50 -    if ( (err < 0) && !netctrl.err )
    2.51 +    if ((err < 0) && !netctrl.err)
    2.52          netctrl.err = err;
    2.53      return netctrl.err;
    2.54  }
    2.55 @@ -216,9 +212,9 @@ static int netctrl_connected(void)
    2.56  {
    2.57      int ok;
    2.58  
    2.59 -    if ( netctrl.err )
    2.60 +    if (netctrl.err)
    2.61          ok = netctrl.err;
    2.62 -    else if ( netctrl.up == NETIF_DRIVER_STATUS_UP )
    2.63 +    else if (netctrl.up == NETIF_DRIVER_STATUS_UP)
    2.64          ok = (netctrl.connected_n == netctrl.interface_n);
    2.65      else
    2.66          ok = 0;
    2.67 @@ -266,14 +262,14 @@ static int send_fake_arp(struct net_devi
    2.68      src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
    2.69  
    2.70      /* No IP? Then nothing to do. */
    2.71 -    if ( src_ip == 0 )
    2.72 +    if (src_ip == 0)
    2.73          return 0;
    2.74  
    2.75      skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
    2.76                       dst_ip, dev, src_ip,
    2.77                       /*dst_hw*/ NULL, /*src_hw*/ NULL, 
    2.78                       /*target_hw*/ dev->dev_addr);
    2.79 -    if ( skb == NULL )
    2.80 +    if (skb == NULL)
    2.81          return -ENOMEM;
    2.82  
    2.83      return dev_queue_xmit(skb);
    2.84 @@ -302,15 +298,14 @@ static void network_tx_buf_gc(struct net
    2.85      struct net_private *np = netdev_priv(dev);
    2.86      struct sk_buff *skb;
    2.87  
    2.88 -    if ( np->backend_state != BEST_CONNECTED )
    2.89 +    if (np->backend_state != BEST_CONNECTED)
    2.90          return;
    2.91  
    2.92      do {
    2.93          prod = np->tx->resp_prod;
    2.94          rmb(); /* Ensure we see responses up to 'rp'. */
    2.95  
    2.96 -        for ( i = np->tx_resp_cons; i != prod; i++ )
    2.97 -        {
    2.98 +        for (i = np->tx_resp_cons; i != prod; i++) {
    2.99              id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
   2.100              skb = np->tx_skbs[id];
   2.101              ADD_ID_TO_FREELIST(np->tx_skbs, id);
   2.102 @@ -330,14 +325,11 @@ static void network_tx_buf_gc(struct net
   2.103          np->tx->event = 
   2.104              prod + ((np->tx->req_prod - prod) >> 1) + 1;
   2.105          mb();
   2.106 -    }
   2.107 -    while ( prod != np->tx->resp_prod );
   2.108 +    } while (prod != np->tx->resp_prod);
   2.109  
   2.110 -    if ( np->tx_full && 
   2.111 -         ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE) )
   2.112 -    {
   2.113 +    if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
   2.114          np->tx_full = 0;
   2.115 -        if ( np->user_state == UST_OPEN )
   2.116 +        if (np->user_state == UST_OPEN)
   2.117              netif_wake_queue(dev);
   2.118      }
   2.119  }
   2.120 @@ -351,7 +343,7 @@ static void network_alloc_rx_buffers(str
   2.121      int i, batch_target;
   2.122      NETIF_RING_IDX req_prod = np->rx->req_prod;
   2.123  
   2.124 -    if ( unlikely(np->backend_state != BEST_CONNECTED) )
   2.125 +    if (unlikely(np->backend_state != BEST_CONNECTED))
   2.126          return;
   2.127  
   2.128      /*
   2.129 @@ -361,20 +353,18 @@ static void network_alloc_rx_buffers(str
   2.130       * ourself and for other kernel subsystems.
   2.131       */
   2.132      batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
   2.133 -    for ( i = skb_queue_len(&np->rx_batch); i < batch_target; i++ )
   2.134 -    {
   2.135 -        if ( unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL) )
   2.136 +    for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
   2.137 +        if (unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL))
   2.138              break;
   2.139          __skb_queue_tail(&np->rx_batch, skb);
   2.140      }
   2.141  
   2.142      /* Is the batch large enough to be worthwhile? */
   2.143 -    if ( i < (np->rx_target/2)  )
   2.144 +    if (i < (np->rx_target/2))
   2.145          return;
   2.146  
   2.147 -    for ( i = 0; ; i++ )
   2.148 -    {
   2.149 -        if ( (skb = __skb_dequeue(&np->rx_batch)) == NULL )
   2.150 +    for (i = 0; ; i++) {
   2.151 +        if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
   2.152              break;
   2.153  
   2.154          skb->dev = dev;
   2.155 @@ -421,15 +411,15 @@ static void network_alloc_rx_buffers(str
   2.156      (void)HYPERVISOR_multicall(rx_mcl, i+1);
   2.157  
   2.158      /* Check return status of HYPERVISOR_dom_mem_op(). */
   2.159 -    if ( unlikely(rx_mcl[i].args[5] != i) )
   2.160 +    if (unlikely(rx_mcl[i].args[5] != i))
   2.161          panic("Unable to reduce memory reservation\n");
   2.162  
   2.163      /* Above is a suitable barrier to ensure backend will see requests. */
   2.164      np->rx->req_prod = req_prod + i;
   2.165  
   2.166      /* Adjust our floating fill target if we risked running out of buffers. */
   2.167 -    if ( ((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
   2.168 -         ((np->rx_target *= 2) > RX_MAX_TARGET) )
   2.169 +    if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
   2.170 +         ((np->rx_target *= 2) > RX_MAX_TARGET))
   2.171          np->rx_target = RX_MAX_TARGET;
   2.172  }
   2.173  
   2.174 @@ -441,18 +431,16 @@ static int network_start_xmit(struct sk_
   2.175      netif_tx_request_t *tx;
   2.176      NETIF_RING_IDX i;
   2.177  
   2.178 -    if ( unlikely(np->tx_full) )
   2.179 -    {
   2.180 +    if (unlikely(np->tx_full)) {
   2.181          printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
   2.182          netif_stop_queue(dev);
   2.183          goto drop;
   2.184      }
   2.185  
   2.186 -    if ( unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
   2.187 -                  PAGE_SIZE) )
   2.188 -    {
   2.189 +    if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
   2.190 +                  PAGE_SIZE)) {
   2.191          struct sk_buff *nskb;
   2.192 -        if ( unlikely((nskb = alloc_xen_skb(skb->len)) == NULL) )
   2.193 +        if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
   2.194              goto drop;
   2.195          skb_put(nskb, skb->len);
   2.196          memcpy(nskb->data, skb->data, skb->len);
   2.197 @@ -463,8 +451,7 @@ static int network_start_xmit(struct sk_
   2.198      
   2.199      spin_lock_irq(&np->tx_lock);
   2.200  
   2.201 -    if ( np->backend_state != BEST_CONNECTED )
   2.202 -    {
   2.203 +    if (np->backend_state != BEST_CONNECTED) {
   2.204          spin_unlock_irq(&np->tx_lock);
   2.205          goto drop;
   2.206      }
   2.207 @@ -485,8 +472,7 @@ static int network_start_xmit(struct sk_
   2.208  
   2.209      network_tx_buf_gc(dev);
   2.210  
   2.211 -    if ( (i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1) )
   2.212 -    {
   2.213 +    if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
   2.214          np->tx_full = 1;
   2.215          netif_stop_queue(dev);
   2.216      }
   2.217 @@ -498,7 +484,7 @@ static int network_start_xmit(struct sk_
   2.218  
   2.219      /* Only notify Xen if we really have to. */
   2.220      mb();
   2.221 -    if ( np->tx->TX_TEST_IDX == i )
   2.222 +    if (np->tx->TX_TEST_IDX == i)
   2.223          notify_via_evtchn(np->evtchn);
   2.224  
   2.225      return 0;
   2.226 @@ -509,7 +495,6 @@ static int network_start_xmit(struct sk_
   2.227      return 0;
   2.228  }
   2.229  
   2.230 -
   2.231  static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
   2.232  {
   2.233      struct net_device *dev = dev_id;
   2.234 @@ -520,8 +505,7 @@ static irqreturn_t netif_int(int irq, vo
   2.235      network_tx_buf_gc(dev);
   2.236      spin_unlock_irqrestore(&np->tx_lock, flags);
   2.237  
   2.238 -    if ( (np->rx_resp_cons != np->rx->resp_prod) &&
   2.239 -         (np->user_state == UST_OPEN) )
   2.240 +    if ((np->rx_resp_cons != np->rx->resp_prod) && (np->user_state == UST_OPEN))
   2.241          netif_rx_schedule(dev);
   2.242  
   2.243      return IRQ_HANDLED;
   2.244 @@ -542,33 +526,30 @@ static int netif_poll(struct net_device 
   2.245  
   2.246      spin_lock(&np->rx_lock);
   2.247  
   2.248 -    if ( np->backend_state != BEST_CONNECTED )
   2.249 -    {
   2.250 +    if (np->backend_state != BEST_CONNECTED) {
   2.251          spin_unlock(&np->rx_lock);
   2.252          return 0;
   2.253      }
   2.254  
   2.255      skb_queue_head_init(&rxq);
   2.256  
   2.257 -    if ( (budget = *pbudget) > dev->quota )
   2.258 +    if ((budget = *pbudget) > dev->quota)
   2.259          budget = dev->quota;
   2.260  
   2.261      rp = np->rx->resp_prod;
   2.262      rmb(); /* Ensure we see queued responses up to 'rp'. */
   2.263  
   2.264 -    for ( i = np->rx_resp_cons, work_done = 0; 
   2.265 -          (i != rp) && (work_done < budget); 
   2.266 -          i++, work_done++ )
   2.267 -    {
   2.268 +    for (i = np->rx_resp_cons, work_done = 0; 
   2.269 +		    (i != rp) && (work_done < budget);
   2.270 +		    i++, work_done++) {
   2.271          rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
   2.272  
   2.273          /*
   2.274           * An error here is very odd. Usually indicates a backend bug,
   2.275           * low-memory condition, or that we didn't have reservation headroom.
   2.276           */
   2.277 -        if ( unlikely(rx->status <= 0) )
   2.278 -        {
   2.279 -            if ( net_ratelimit() )
   2.280 +        if (unlikely(rx->status <= 0)) {
   2.281 +            if (net_ratelimit())
   2.282                  printk(KERN_WARNING "Bad rx buffer (memory squeeze?).\n");
   2.283              np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
   2.284              wmb();
   2.285 @@ -608,8 +589,7 @@ static int netif_poll(struct net_device 
   2.286      balloon_update_driver_allowance(-work_done);
   2.287  
   2.288      /* Do all the remapping work, and M->P updates, in one big hypercall. */
   2.289 -    if ( likely((mcl - rx_mcl) != 0) )
   2.290 -    {
   2.291 +    if (likely((mcl - rx_mcl) != 0)) {
   2.292          mcl->op = __HYPERVISOR_mmu_update;
   2.293          mcl->args[0] = (unsigned long)rx_mmu;
   2.294          mcl->args[1] = mmu - rx_mmu;
   2.295 @@ -618,33 +598,29 @@ static int netif_poll(struct net_device 
   2.296          (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
   2.297      }
   2.298  
   2.299 -    while ( (skb = __skb_dequeue(&rxq)) != NULL )
   2.300 -    {
   2.301 +    while ((skb = __skb_dequeue(&rxq)) != NULL) {
   2.302          /*
   2.303           * Enough room in skbuff for the data we were passed? Also, Linux 
   2.304           * expects at least 16 bytes headroom in each receive buffer.
   2.305           */
   2.306 -        if ( unlikely(skb->tail > skb->end) ||
   2.307 -             unlikely((skb->data - skb->head) < 16) )
   2.308 -        {
   2.309 +        if (unlikely(skb->tail > skb->end) || 
   2.310 +			unlikely((skb->data - skb->head) < 16)) {
   2.311              nskb = NULL;
   2.312  
   2.313              /* Only copy the packet if it fits in the current MTU. */
   2.314 -            if ( skb->len <= (dev->mtu + ETH_HLEN) )
   2.315 -            {
   2.316 -                if ( (skb->tail > skb->end) && net_ratelimit() )
   2.317 +            if (skb->len <= (dev->mtu + ETH_HLEN)) {
   2.318 +                if ((skb->tail > skb->end) && net_ratelimit())
   2.319                      printk(KERN_INFO "Received packet needs %d bytes more "
   2.320                             "headroom.\n", skb->tail - skb->end);
   2.321  
   2.322 -                if ( (nskb = alloc_xen_skb(skb->len + 2)) != NULL )
   2.323 -                {
   2.324 +                if ((nskb = alloc_xen_skb(skb->len + 2)) != NULL) {
   2.325                      skb_reserve(nskb, 2);
   2.326                      skb_put(nskb, skb->len);
   2.327                      memcpy(nskb->data, skb->data, skb->len);
   2.328                      nskb->dev = skb->dev;
   2.329                  }
   2.330              }
   2.331 -            else if ( net_ratelimit() )
   2.332 +            else if (net_ratelimit())
   2.333                  printk(KERN_INFO "Received packet too big for MTU "
   2.334                         "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu);
   2.335  
   2.336 @@ -655,7 +631,7 @@ static int netif_poll(struct net_device 
   2.337              dev_kfree_skb(skb);
   2.338  
   2.339              /* Switch old for new, if we copied the buffer. */
   2.340 -            if ( (skb = nskb) == NULL )
   2.341 +            if ((skb = nskb) == NULL)
   2.342                  continue;
   2.343          }
   2.344          
   2.345 @@ -674,8 +650,8 @@ static int netif_poll(struct net_device 
   2.346  
   2.347      /* If we get a callback with very few responses, reduce fill target. */
   2.348      /* NB. Note exponential increase, linear decrease. */
   2.349 -    if ( ((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
   2.350 -         (--np->rx_target < RX_MIN_TARGET) )
   2.351 +    if (((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
   2.352 +         (--np->rx_target < RX_MIN_TARGET))
   2.353          np->rx_target = RX_MIN_TARGET;
   2.354  
   2.355      network_alloc_rx_buffers(dev);
   2.356 @@ -683,16 +659,14 @@ static int netif_poll(struct net_device 
   2.357      *pbudget   -= work_done;
   2.358      dev->quota -= work_done;
   2.359  
   2.360 -    if ( work_done < budget )
   2.361 -    {
   2.362 +    if (work_done < budget) {
   2.363          local_irq_save(flags);
   2.364  
   2.365          np->rx->event = i + 1;
   2.366      
   2.367          /* Deal with hypervisor racing our resetting of rx_event. */
   2.368          mb();
   2.369 -        if ( np->rx->resp_prod == i )
   2.370 -        {
   2.371 +        if (np->rx->resp_prod == i) {
   2.372              __netif_rx_complete(dev);
   2.373              more_to_do = 0;
   2.374          }
   2.375 @@ -755,10 +729,8 @@ static void network_connect(struct net_d
   2.376       * to avoid this but maybe it doesn't matter so much given the
   2.377       * interface has been down.
   2.378       */
   2.379 -    for ( requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++ )
   2.380 -    {
   2.381 -            if ( (unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET )
   2.382 -            {
   2.383 +    for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
   2.384 +            if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
   2.385                  struct sk_buff *skb = np->tx_skbs[i];
   2.386                  
   2.387                  tx = &np->tx->ring[requeue_idx++].req;
   2.388 @@ -775,8 +747,8 @@ static void network_connect(struct net_d
   2.389      np->tx->req_prod = requeue_idx;
   2.390  
   2.391      /* Rebuild the RX buffer freelist and the RX ring itself. */
   2.392 -    for ( requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++ )
   2.393 -        if ( (unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET )
   2.394 +    for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++)
   2.395 +        if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET)
   2.396              np->rx->ring[requeue_idx++].req.id = i;
   2.397      wmb();                
   2.398      np->rx->req_prod = requeue_idx;
   2.399 @@ -791,7 +763,7 @@ static void network_connect(struct net_d
   2.400      notify_via_evtchn(status->evtchn);  
   2.401      network_tx_buf_gc(dev);
   2.402  
   2.403 -    if ( np->user_state == UST_OPEN )
   2.404 +    if (np->user_state == UST_OPEN)
   2.405          netif_start_queue(dev);
   2.406  
   2.407      spin_unlock(&np->rx_lock);
   2.408 @@ -917,9 +889,7 @@ static void vif_disconnect(struct net_pr
   2.409   * is initiated by a special "RESET" message - disconnect could
   2.410   * just mean we're not allowed to use this interface any more.
   2.411   */
   2.412 -static void 
   2.413 -vif_reset(
   2.414 -    struct net_private *np)
   2.415 +static void vif_reset(struct net_private *np)
   2.416  {
   2.417      IPRINTK("Attempting to reconnect network interface: handle=%u\n",
   2.418              np->handle);    
   2.419 @@ -932,9 +902,8 @@ vif_reset(
   2.420   * Sets the mac and event channel from the message.
   2.421   * Binds the irq to the event channel.
   2.422   */
   2.423 -static void
   2.424 -vif_connect(
   2.425 -    struct net_private *np, netif_fe_interface_status_t *status)
   2.426 +static void 
   2.427 +vif_connect(struct net_private *np, netif_fe_interface_status_t *status)
   2.428  {
   2.429      struct net_device *dev = np->dev;
   2.430      memcpy(dev->dev_addr, status->mac, ETH_ALEN);
   2.431 @@ -959,8 +928,7 @@ static int create_netdev(int handle, str
   2.432      struct net_device *dev = NULL;
   2.433      struct net_private *np = NULL;
   2.434  
   2.435 -    if ( (dev = alloc_etherdev(sizeof(struct net_private))) == NULL )
   2.436 -    {
   2.437 +    if ((dev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
   2.438          printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
   2.439          err = -ENOMEM;
   2.440          goto exit;
   2.441 @@ -978,9 +946,9 @@ static int create_netdev(int handle, str
   2.442      np->rx_target = RX_MIN_TARGET;
   2.443  
   2.444      /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
   2.445 -    for ( i = 0; i <= NETIF_TX_RING_SIZE; i++ )
   2.446 +    for (i = 0; i <= NETIF_TX_RING_SIZE; i++)
   2.447          np->tx_skbs[i] = (void *)(i+1);
   2.448 -    for ( i = 0; i <= NETIF_RX_RING_SIZE; i++ )
   2.449 +    for (i = 0; i <= NETIF_RX_RING_SIZE; i++)
   2.450          np->rx_skbs[i] = (void *)(i+1);
   2.451  
   2.452      dev->open            = network_open;
   2.453 @@ -990,8 +958,7 @@ static int create_netdev(int handle, str
   2.454      dev->poll            = netif_poll;
   2.455      dev->weight          = 64;
   2.456      
   2.457 -    if ( (err = register_netdev(dev)) != 0 )
   2.458 -    {
   2.459 +    if ((err = register_netdev(dev)) != 0) {
   2.460          printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
   2.461          goto exit;
   2.462      }
   2.463 @@ -999,9 +966,9 @@ static int create_netdev(int handle, str
   2.464      list_add(&np->list, &dev_list);
   2.465  
   2.466    exit:
   2.467 -    if ( (err != 0) && (dev != NULL ) )
   2.468 +    if ((err != 0) && (dev != NULL ))
   2.469          kfree(dev);
   2.470 -    else if ( val != NULL )
   2.471 +    else if (val != NULL)
   2.472          *val = dev;
   2.473      return err;
   2.474  }
   2.475 @@ -1015,36 +982,34 @@ static int create_netdev(int handle, str
   2.476   * @return 0 on success, error code otherwise
   2.477   */
   2.478  static int 
   2.479 -target_vif(
   2.480 -    netif_fe_interface_status_t *status, struct net_private **np)
   2.481 +target_vif(netif_fe_interface_status_t *status, struct net_private **np)
   2.482  {
   2.483      int err = 0;
   2.484      struct net_device *dev;
   2.485  
   2.486      DPRINTK("> handle=%d\n", status->handle);
   2.487 -    if ( status->handle < 0 )
   2.488 -    {
   2.489 +    if (status->handle < 0) {
   2.490          err = -EINVAL;
   2.491          goto exit;
   2.492      }
   2.493  
   2.494 -    if ( (dev = find_dev_by_handle(status->handle)) != NULL )
   2.495 +    if ((dev = find_dev_by_handle(status->handle)) != NULL)
   2.496          goto exit;
   2.497  
   2.498 -    if ( status->status == NETIF_INTERFACE_STATUS_CLOSED )
   2.499 +    if (status->status == NETIF_INTERFACE_STATUS_CLOSED)
   2.500          goto exit;
   2.501 -    if ( status->status == NETIF_INTERFACE_STATUS_CHANGED )
   2.502 +    if (status->status == NETIF_INTERFACE_STATUS_CHANGED)
   2.503          goto exit;
   2.504  
   2.505      /* It's a new interface in a good state - create it. */
   2.506      DPRINTK("> create device...\n");
   2.507 -    if ( (err = create_netdev(status->handle, &dev)) != 0 )
   2.508 +    if ((err = create_netdev(status->handle, &dev)) != 0)
   2.509          goto exit;
   2.510  
   2.511      netctrl.interface_n++;
   2.512  
   2.513    exit:
   2.514 -    if ( np != NULL )
   2.515 +    if (np != NULL)
   2.516          *np = ((dev && !err) ? netdev_priv(dev) : NULL);
   2.517      DPRINTK("< err=%d\n", err);
   2.518      return err;
   2.519 @@ -1059,23 +1024,19 @@ static void netif_interface_status(netif
   2.520      DPRINTK("> status=%s handle=%d\n",
   2.521              status_name[status->status], status->handle);
   2.522  
   2.523 -    if ( (err = target_vif(status, &np)) != 0 )
   2.524 -    {
   2.525 +    if ((err = target_vif(status, &np)) != 0) {
   2.526          WPRINTK("Invalid netif: handle=%u\n", status->handle);
   2.527          return;
   2.528      }
   2.529  
   2.530 -    if ( np == NULL )
   2.531 -    {
   2.532 +    if (np == NULL) {
   2.533          DPRINTK("> no vif\n");
   2.534          return;
   2.535      }
   2.536  
   2.537 -    switch ( status->status )
   2.538 -    {
   2.539 +    switch (status->status) {
   2.540      case NETIF_INTERFACE_STATUS_CLOSED:
   2.541 -        switch ( np->backend_state )
   2.542 -        {
   2.543 +        switch (np->backend_state) {
   2.544          case BEST_CLOSED:
   2.545          case BEST_DISCONNECTED:
   2.546          case BEST_CONNECTED:
   2.547 @@ -1085,8 +1046,7 @@ static void netif_interface_status(netif
   2.548          break;
   2.549  
   2.550      case NETIF_INTERFACE_STATUS_DISCONNECTED:
   2.551 -        switch ( np->backend_state )
   2.552 -        {
   2.553 +        switch (np->backend_state) {
   2.554          case BEST_CLOSED:
   2.555              vif_disconnect(np);
   2.556              break;
   2.557 @@ -1098,8 +1058,7 @@ static void netif_interface_status(netif
   2.558          break;
   2.559  
   2.560      case NETIF_INTERFACE_STATUS_CONNECTED:
   2.561 -        switch ( np->backend_state )
   2.562 -        {
   2.563 +        switch (np->backend_state) {
   2.564          case BEST_CLOSED:
   2.565              WPRINTK("Unexpected netif status %s in state %s\n",
   2.566                      status_name[status->status],
   2.567 @@ -1141,20 +1100,17 @@ static void netif_driver_status(netif_fe
   2.568  static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
   2.569  {
   2.570  
   2.571 -    switch ( msg->subtype )
   2.572 -    {
   2.573 +    switch (msg->subtype) {
   2.574      case CMSG_NETIF_FE_INTERFACE_STATUS:
   2.575 -        if ( msg->length != sizeof(netif_fe_interface_status_t) )
   2.576 +        if (msg->length != sizeof(netif_fe_interface_status_t))
   2.577              goto error;
   2.578 -        netif_interface_status((netif_fe_interface_status_t *)
   2.579 -                               &msg->msg[0]);
   2.580 +        netif_interface_status((netif_fe_interface_status_t *) &msg->msg[0]);
   2.581          break;
   2.582  
   2.583      case CMSG_NETIF_FE_DRIVER_STATUS:
   2.584 -        if ( msg->length != sizeof(netif_fe_driver_status_t) )
   2.585 +        if (msg->length != sizeof(netif_fe_driver_status_t))
   2.586              goto error;
   2.587 -        netif_driver_status((netif_fe_driver_status_t *)
   2.588 -                            &msg->msg[0]);
   2.589 +        netif_driver_status((netif_fe_driver_status_t *) &msg->msg[0]);
   2.590          break;
   2.591  
   2.592      error:
   2.593 @@ -1179,8 +1135,7 @@ static int probe_interfaces(void)
   2.594  
   2.595      DPRINTK(">\n");
   2.596  
   2.597 -    for ( wait_i = 0; wait_i < wait_n; wait_i++)
   2.598 -    { 
   2.599 +    for (wait_i = 0; wait_i < wait_n; wait_i++) { 
   2.600          DPRINTK("> wait_i=%d\n", wait_i);
   2.601          conn = netctrl_connected();
   2.602          if(conn) break;
   2.603 @@ -1190,8 +1145,7 @@ static int probe_interfaces(void)
   2.604      }
   2.605  
   2.606      DPRINTK("> wait finished...\n");
   2.607 -    if ( conn <= 0 )
   2.608 -    {
   2.609 +    if (conn <= 0) {
   2.610          err = netctrl_err(-ENETDOWN);
   2.611          WPRINTK("Failed to connect all virtual interfaces: err=%d\n", err);
   2.612      }
   2.613 @@ -1223,8 +1177,7 @@ static int probe_interfaces(void)
   2.614      DPRINTK(">\n");
   2.615  
   2.616      netctrl.interface_n = 0;
   2.617 -    for ( wait_i = 0; wait_i < wait_n; wait_i++ )
   2.618 -    { 
   2.619 +    for (wait_i = 0; wait_i < wait_n; wait_i++) { 
   2.620          DPRINTK("> wait_i=%d query=%d\n", wait_i, query);
   2.621          msg.handle = query;
   2.622          memcpy(cmsg.msg, &msg, sizeof(msg));
   2.623 @@ -1236,7 +1189,7 @@ static int probe_interfaces(void)
   2.624          DPRINTK("> err=%d\n", err);
   2.625          if(err) goto exit;
   2.626          DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
   2.627 -        if((int)reply->handle < 0){
   2.628 +        if((int)reply->handle < 0) {
   2.629              // No more interfaces.
   2.630              break;
   2.631          }
   2.632 @@ -1246,8 +1199,7 @@ static int probe_interfaces(void)
   2.633      }
   2.634  
   2.635    exit:
   2.636 -    if ( err )
   2.637 -    {
   2.638 +    if (err) {
   2.639          err = netctrl_err(-ENETDOWN);
   2.640          WPRINTK("Connecting virtual network interfaces failed: err=%d\n", err);
   2.641      }
   2.642 @@ -1262,22 +1214,20 @@ static int probe_interfaces(void)
   2.643   * We use this notifier to send out a fake ARP reply to reset switches and
   2.644   * router ARP caches when an IP interface is brought up on a VIF.
   2.645   */
   2.646 -static int inetdev_notify(struct notifier_block *this, 
   2.647 -                          unsigned long event, 
   2.648 -                          void *ptr)
   2.649 +static int 
   2.650 +inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
   2.651  {
   2.652      struct in_ifaddr  *ifa = (struct in_ifaddr *)ptr; 
   2.653      struct net_device *dev = ifa->ifa_dev->dev;
   2.654      struct list_head  *ent;
   2.655      struct net_private *np;
   2.656  
   2.657 -    if ( event != NETDEV_UP )
   2.658 +    if (event != NETDEV_UP)
   2.659          goto out;
   2.660  
   2.661 -    list_for_each ( ent, &dev_list )
   2.662 -    {
   2.663 +    list_for_each (ent, &dev_list) {
   2.664          np = list_entry(ent, struct net_private, list);
   2.665 -        if ( np->dev == dev )
   2.666 +        if (np->dev == dev)
   2.667              (void)send_fake_arp(dev);
   2.668      }
   2.669          
   2.670 @@ -1295,7 +1245,7 @@ static int __init netif_init(void)
   2.671  {
   2.672      int err = 0;
   2.673  
   2.674 -    if ( xen_start_info.flags & SIF_INITDOMAIN )
   2.675 +    if (xen_start_info.flags & SIF_INITDOMAIN)
   2.676          return 0;
   2.677  
   2.678      IPRINTK("Initialising virtual ethernet driver.\n");
   2.679 @@ -1306,7 +1256,7 @@ static int __init netif_init(void)
   2.680                                      CALLBACK_IN_BLOCKING_CONTEXT);
   2.681      send_driver_status(1);
   2.682      err = probe_interfaces();
   2.683 -    if ( err )
   2.684 +    if (err)
   2.685          ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
   2.686  
   2.687      DPRINTK("< err=%d\n", err);
   2.688 @@ -1338,8 +1288,7 @@ void netif_suspend(void)
   2.689      struct list_head *ent;
   2.690      struct net_private *np;
   2.691      
   2.692 -    list_for_each ( ent, &dev_list )
   2.693 -    {
   2.694 +    list_for_each (ent, &dev_list) {
   2.695          np = list_entry(ent, struct net_private, list);
   2.696          vif_suspend(np);
   2.697      }
   2.698 @@ -1350,8 +1299,7 @@ void netif_resume(void)
   2.699      struct list_head *ent;
   2.700      struct net_private *np;
   2.701  
   2.702 -    list_for_each ( ent, &dev_list )
   2.703 -    {
   2.704 +    list_for_each (ent, &dev_list) {
   2.705          np = list_entry(ent, struct net_private, list);
   2.706          vif_resume(np);
   2.707      }
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/tools/vnet/INSTALL	Thu Feb 10 02:27:48 2005 +0000
     3.3 @@ -0,0 +1,31 @@
     3.4 +To compile and install run "make install"; if it fails or you need to reinstall 
     3.5 +run "make clean" first or the build will fail, at least that is what I have 
     3.6 +found under 2.6.10.
     3.7 +
     3.8 +Other important items:
     3.9 +1)	You will need to have your xen0 kernel compiled with HMAC_SUPPORT 
    3.10 +	2.6.x = (MAIN MENU: Cryptographic Options -> HMAC Support)
    3.11 +	BEFORE running "make install".
    3.12 +
    3.13 +2)	You will want at least some of the other alogorithms listed under
    3.14 +	"Cryptographic Options" for the kernel compiled as modules.
    3.15 +
    3.16 +3)	You will want the networking IPsec/VLAN options compiled in as modules
    3.17 +	2.6.x = (MAIN MENU: Device Drivers -> Networking Support -> 
    3.18 +				Networking Options ->
    3.19 +					IP: AH transformation
    3.20 +					IP: ESP transformation
    3.21 +					IP: IPComp transformation 
    3.22 +					IP: tunnel transformation
    3.23 +
    3.24 +					IPsec user configuration interface
    3.25 +	
    3.26 +					802.1Q VLAN Support
    3.27 +
    3.28 +4)	The module (vnet_module) will not properly load from the command line
    3.29 +	with a "modprobe vnet_module".  Use network-vnet to properly configure
    3.30 +	your system and load the module for you.
    3.31 +
    3.32 +Please refer to the additional documentation found in tools/vnet/doc for
    3.33 +proper syntax and config file parameters.
    3.34 +
     4.1 --- a/tools/vnet/Makefile	Wed Feb 09 18:57:38 2005 +0000
     4.2 +++ b/tools/vnet/Makefile	Thu Feb 10 02:27:48 2005 +0000
     4.3 @@ -12,6 +12,7 @@ export prefix?=$(shell cd ../../dist/ins
     4.4  all: compile
     4.5  
     4.6  compile: vnetd vnet-module
     4.7 +#compile: vnet-module
     4.8  
     4.9  gc.tar.gz:
    4.10  	wget http://www.hpl.hp.com/personal/Hans_Boehm/gc/gc_source/$@
    4.11 @@ -20,10 +21,8 @@ gc: gc.tar.gz
    4.12  	tar xfz gc.tar.gz
    4.13  	ln -sf gc?.? gc
    4.14  
    4.15 -gc/Makefile:
    4.16 -	(cd gc && ./configure --prefix=`pwd`/install)
    4.17 -
    4.18 -gc-install: gc gc/Makefile
    4.19 +gc-install: gc 
    4.20 +	(cd gc && make test && ./configure --prefix=`pwd`/install)
    4.21  	make -C gc
    4.22  	make -C gc install
    4.23  
    4.24 @@ -47,5 +46,6 @@ install: compile
    4.25  clean:
    4.26  	-$(MAKE) -C vnetd clean
    4.27  	-$(MAKE) -C vnet-module clean
    4.28 -
    4.29 +	-rm -rf gc?.? gc
    4.30 +	
    4.31  pristine: clean gc-pristine
     5.1 --- a/tools/vnet/vnet-module/Makefile-2.6	Wed Feb 09 18:57:38 2005 +0000
     5.2 +++ b/tools/vnet/vnet-module/Makefile-2.6	Thu Feb 10 02:27:48 2005 +0000
     5.3 @@ -38,8 +38,8 @@ module modules:
     5.4  
     5.5  .PHONY: install install-module modules_install
     5.6  install install-module modules_install: module
     5.7 -	install -m 0755 -d $(DESTDIR)$(KERNEL_MODULE_DIR)
     5.8 -	install -m 0554 $(KERNEL_MODULE) $(DESTDIR)$(KERNEL_MODULE_DIR)
     5.9 +	install -m 0755 -d $(DESTDIR)$(KERNEL_MODULE_DIR)/xen
    5.10 +	install -m 0554 $(KERNEL_MODULE) $(DESTDIR)$(KERNEL_MODULE_DIR)/xen
    5.11  
    5.12  .PHONY: clean
    5.13  clean:
     6.1 --- a/tools/vnet/vnet-module/if_varp.h	Wed Feb 09 18:57:38 2005 +0000
     6.2 +++ b/tools/vnet/vnet-module/if_varp.h	Thu Feb 10 02:27:48 2005 +0000
     6.3 @@ -36,7 +36,7 @@ typedef struct VnetMsgHdr {
     6.4  } __attribute__((packed)) VnetMsgHdr;
     6.5  
     6.6  typedef struct VarpHdr {
     6.7 -    VnetMsgHdr;
     6.8 +    VnetMsgHdr vnetmsghdr;
     6.9      uint32_t vnet;
    6.10      Vmac vmac;
    6.11      uint32_t addr;
    6.12 @@ -50,4 +50,4 @@ typedef struct VarpHdr {
    6.13  
    6.14  
    6.15  
    6.16 -#endif /* ! _VNET_IF_VARP_H */
    6.17 +#endif  /* ! _VNET_IF_VARP_H */
     7.1 --- a/tools/vnet/vnet-module/varp.c	Wed Feb 09 18:57:38 2005 +0000
     7.2 +++ b/tools/vnet/vnet-module/varp.c	Thu Feb 10 02:27:48 2005 +0000
     7.3 @@ -368,8 +368,8 @@ int varp_send(u16 opcode, struct net_dev
     7.4      // Varp header.
     7.5      varph = (void*)skb_put(skbout, varp_n);
     7.6      *varph = (VarpHdr){};
     7.7 -    varph->id                = htons(VARP_ID);
     7.8 -    varph->opcode            = htons(opcode);
     7.9 +    varph->vnetmsghdr.id     = htons(VARP_ID);
    7.10 +    varph->vnetmsghdr.opcode = htons(opcode);
    7.11      varph->vnet              = htonl(vnet);
    7.12      varph->vmac              = *vmac;
    7.13      varph->addr              = saddr;
    7.14 @@ -1076,9 +1076,9 @@ int varp_handle_message(struct sk_buff *
    7.15          goto exit;
    7.16      }
    7.17      mine = 1;
    7.18 -    if(varph->id != htons(VARP_ID)){
    7.19 +    if(varph->vnetmsghdr.id != htons(VARP_ID)){
    7.20          // It's not varp at all - ignore it.
    7.21 -        wprintf("> Unknown id: %d \n", ntohs(varph->id));
    7.22 +        wprintf("> Unknown id: %d \n", ntohs(varph->vnetmsghdr.id));
    7.23          goto exit;
    7.24      }
    7.25      if(1){
    7.26 @@ -1086,13 +1086,13 @@ int varp_handle_message(struct sk_buff *
    7.27                  NIPQUAD(skb->nh.iph->saddr), NIPQUAD(skb->nh.iph->daddr));
    7.28          dprintf("> sport=%u dport=%u\n", ntohs(skb->h.uh->source), ntohs(skb->h.uh->dest));
    7.29          dprintf("> opcode=%d vnet=%u vmac=" MACFMT " addr=" IPFMT "\n",
    7.30 -                ntohs(varph->opcode),
    7.31 +                ntohs(varph->vnetmsghdr.opcode),
    7.32                  ntohl(varph->vnet),
    7.33                  MAC6TUPLE(varph->vmac.mac),
    7.34                  NIPQUAD(varph->addr));
    7.35          varp_dprint();
    7.36      }
    7.37 -    switch(ntohs(varph->opcode)){
    7.38 +    switch(ntohs(varph->vnetmsghdr.opcode)){
    7.39      case VARP_OP_REQUEST:
    7.40          err = varp_handle_request(skb, varph);
    7.41          break;
    7.42 @@ -1100,7 +1100,7 @@ int varp_handle_message(struct sk_buff *
    7.43          err = varp_handle_announce(skb, varph);
    7.44          break;
    7.45      default:
    7.46 -        wprintf("> Unknown opcode: %d \n", ntohs(varph->opcode));
    7.47 +        wprintf("> Unknown opcode: %d \n", ntohs(varph->vnetmsghdr.opcode));
    7.48         break;
    7.49      }
    7.50    exit:
     8.1 --- a/tools/vnet/vnetd/vcache.c	Wed Feb 09 18:57:38 2005 +0000
     8.2 +++ b/tools/vnet/vnetd/vcache.c	Thu Feb 10 02:27:48 2005 +0000
     8.3 @@ -102,11 +102,11 @@ int varp_send(Conn *conn, uint16_t opcod
     8.4      int varp_n = sizeof(VarpHdr);
     8.5      VarpHdr varph = {};
     8.6  
     8.7 -    varph.id     = htons(VARP_ID);
     8.8 -    varph.opcode = htons(opcode);
     8.9 -    varph.vnet   = vnet;
    8.10 -    varph.vmac   = *vmac;
    8.11 -    varph.addr   = addr;
    8.12 +    varph.vnetmsghdr.id     = htons(VARP_ID);
    8.13 +    varph.vnetmsghdr.opcode = htons(opcode);
    8.14 +    varph.vnet              = vnet;
    8.15 +    varph.vmac              = *vmac;
    8.16 +    varph.addr              = addr;
    8.17  
    8.18      if(0){
    8.19          struct sockaddr_in self;
    8.20 @@ -503,7 +503,7 @@ void VarpCache_sweep(VarpCache *z, int a
    8.21   * @param local whether it's local or not
    8.22   */
    8.23  void vcache_forward_varp(VarpHdr *varph, int local){
    8.24 -    uint16_t opcode = ntohs(varph->opcode);
    8.25 +    uint16_t opcode = ntohs(varph->vnetmsghdr.opcode);
    8.26      if(local){
    8.27          ConnList *l;
    8.28          for(l = vnetd->connections; l; l = l->next){
    8.29 @@ -611,7 +611,7 @@ int vcache_handle_message(IPMessage *msg
    8.30          dprintf("> opcode=%d vnet=%u vmac=" MACFMT "\n",
    8.31                  ntohs(varph->opcode), ntohl(varph->vnet), MAC6TUPLE(varph->vmac.mac));
    8.32      }
    8.33 -    switch(ntohs(varph->opcode)){
    8.34 +    switch(ntohs(varph->vnetmsghdr.opcode)){
    8.35      case VARP_OP_REQUEST:
    8.36          err = vcache_handle_request(msg, varph, local);
    8.37          break;