ia64/xen-unstable

changeset 3758:9376fcbfd5a0

bitkeeper revision 1.1159.223.84 (420a6831W2g6HHtXksV16NZox0sA5Q)

Subject: [Xen-devel] [PATCH 1/2] netfront coding style clean-up (trivial)

Signed-off-by: Jon Mason <jdmason@us.ibm.com>
Signed-off-by: ian@xensource.com
author iap10@freefall.cl.cam.ac.uk
date Wed Feb 09 19:44:49 2005 +0000 (2005-02-09)
parents 4b38851f1bd1
children b5019559f1ca ce135fa88699
files linux-2.6.10-xen-sparse/drivers/xen/netfront/netfront.c
line diff
     1.1 --- a/linux-2.6.10-xen-sparse/drivers/xen/netfront/netfront.c	Wed Feb 09 02:58:09 2005 +0000
     1.2 +++ b/linux-2.6.10-xen-sparse/drivers/xen/netfront/netfront.c	Wed Feb 09 19:44:49 2005 +0000
     1.3 @@ -41,6 +41,8 @@
     1.4  #include <linux/bitops.h>
     1.5  #include <net/sock.h>
     1.6  #include <net/pkt_sched.h>
     1.7 +#include <net/arp.h>
     1.8 +#include <net/route.h>
     1.9  #include <asm/io.h>
    1.10  #include <asm-xen/evtchn.h>
    1.11  #include <asm-xen/ctrl_if.h>
    1.12 @@ -48,11 +50,6 @@
    1.13  #include <asm-xen/balloon.h>
    1.14  #include <asm/page.h>
    1.15  
    1.16 -#include <net/arp.h>
    1.17 -#include <net/route.h>
    1.18 -
    1.19 -#define DEBUG 0
    1.20 -
    1.21  #ifndef __GFP_NOWARN
    1.22  #define __GFP_NOWARN 0
    1.23  #endif
    1.24 @@ -63,7 +60,7 @@
    1.25          atomic_set(&(skb_shinfo(_skb)->dataref), 1);  \
    1.26          skb_shinfo(_skb)->nr_frags = 0;               \
    1.27          skb_shinfo(_skb)->frag_list = NULL;           \
    1.28 -    } while ( 0 )
    1.29 +    } while (0)
    1.30  
    1.31  /* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
    1.32  #define RX_HEADROOM 200
    1.33 @@ -171,10 +168,9 @@ static struct net_device *find_dev_by_ha
    1.34  {
    1.35      struct list_head *ent;
    1.36      struct net_private *np;
    1.37 -    list_for_each ( ent, &dev_list )
    1.38 -    {
    1.39 +    list_for_each (ent, &dev_list) {
    1.40          np = list_entry(ent, struct net_private, list);
    1.41 -        if ( np->handle == handle )
    1.42 +        if (np->handle == handle)
    1.43              return np->dev;
    1.44      }
    1.45      return NULL;
    1.46 @@ -203,7 +199,7 @@ static void netctrl_init(void)
    1.47   */
    1.48  static int netctrl_err(int err)
    1.49  {
    1.50 -    if ( (err < 0) && !netctrl.err )
    1.51 +    if ((err < 0) && !netctrl.err)
    1.52          netctrl.err = err;
    1.53      return netctrl.err;
    1.54  }
    1.55 @@ -216,9 +212,9 @@ static int netctrl_connected(void)
    1.56  {
    1.57      int ok;
    1.58  
    1.59 -    if ( netctrl.err )
    1.60 +    if (netctrl.err)
    1.61          ok = netctrl.err;
    1.62 -    else if ( netctrl.up == NETIF_DRIVER_STATUS_UP )
    1.63 +    else if (netctrl.up == NETIF_DRIVER_STATUS_UP)
    1.64          ok = (netctrl.connected_n == netctrl.interface_n);
    1.65      else
    1.66          ok = 0;
    1.67 @@ -266,14 +262,14 @@ static int send_fake_arp(struct net_devi
    1.68      src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
    1.69  
    1.70      /* No IP? Then nothing to do. */
    1.71 -    if ( src_ip == 0 )
    1.72 +    if (src_ip == 0)
    1.73          return 0;
    1.74  
    1.75      skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
    1.76                       dst_ip, dev, src_ip,
    1.77                       /*dst_hw*/ NULL, /*src_hw*/ NULL, 
    1.78                       /*target_hw*/ dev->dev_addr);
    1.79 -    if ( skb == NULL )
    1.80 +    if (skb == NULL)
    1.81          return -ENOMEM;
    1.82  
    1.83      return dev_queue_xmit(skb);
    1.84 @@ -302,15 +298,14 @@ static void network_tx_buf_gc(struct net
    1.85      struct net_private *np = netdev_priv(dev);
    1.86      struct sk_buff *skb;
    1.87  
    1.88 -    if ( np->backend_state != BEST_CONNECTED )
    1.89 +    if (np->backend_state != BEST_CONNECTED)
    1.90          return;
    1.91  
    1.92      do {
    1.93          prod = np->tx->resp_prod;
    1.94          rmb(); /* Ensure we see responses up to 'rp'. */
    1.95  
    1.96 -        for ( i = np->tx_resp_cons; i != prod; i++ )
    1.97 -        {
    1.98 +        for (i = np->tx_resp_cons; i != prod; i++) {
    1.99              id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
   1.100              skb = np->tx_skbs[id];
   1.101              ADD_ID_TO_FREELIST(np->tx_skbs, id);
   1.102 @@ -330,14 +325,11 @@ static void network_tx_buf_gc(struct net
   1.103          np->tx->event = 
   1.104              prod + ((np->tx->req_prod - prod) >> 1) + 1;
   1.105          mb();
   1.106 -    }
   1.107 -    while ( prod != np->tx->resp_prod );
   1.108 +    } while (prod != np->tx->resp_prod);
   1.109  
   1.110 -    if ( np->tx_full && 
   1.111 -         ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE) )
   1.112 -    {
   1.113 +    if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
   1.114          np->tx_full = 0;
   1.115 -        if ( np->user_state == UST_OPEN )
   1.116 +        if (np->user_state == UST_OPEN)
   1.117              netif_wake_queue(dev);
   1.118      }
   1.119  }
   1.120 @@ -351,7 +343,7 @@ static void network_alloc_rx_buffers(str
   1.121      int i, batch_target;
   1.122      NETIF_RING_IDX req_prod = np->rx->req_prod;
   1.123  
   1.124 -    if ( unlikely(np->backend_state != BEST_CONNECTED) )
   1.125 +    if (unlikely(np->backend_state != BEST_CONNECTED))
   1.126          return;
   1.127  
   1.128      /*
   1.129 @@ -361,20 +353,18 @@ static void network_alloc_rx_buffers(str
   1.130       * ourself and for other kernel subsystems.
   1.131       */
   1.132      batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
   1.133 -    for ( i = skb_queue_len(&np->rx_batch); i < batch_target; i++ )
   1.134 -    {
   1.135 -        if ( unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL) )
   1.136 +    for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
   1.137 +        if (unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL))
   1.138              break;
   1.139          __skb_queue_tail(&np->rx_batch, skb);
   1.140      }
   1.141  
   1.142      /* Is the batch large enough to be worthwhile? */
   1.143 -    if ( i < (np->rx_target/2)  )
   1.144 +    if (i < (np->rx_target/2))
   1.145          return;
   1.146  
   1.147 -    for ( i = 0; ; i++ )
   1.148 -    {
   1.149 -        if ( (skb = __skb_dequeue(&np->rx_batch)) == NULL )
   1.150 +    for (i = 0; ; i++) {
   1.151 +        if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
   1.152              break;
   1.153  
   1.154          skb->dev = dev;
   1.155 @@ -421,15 +411,15 @@ static void network_alloc_rx_buffers(str
   1.156      (void)HYPERVISOR_multicall(rx_mcl, i+1);
   1.157  
   1.158      /* Check return status of HYPERVISOR_dom_mem_op(). */
   1.159 -    if ( unlikely(rx_mcl[i].args[5] != i) )
   1.160 +    if (unlikely(rx_mcl[i].args[5] != i))
   1.161          panic("Unable to reduce memory reservation\n");
   1.162  
   1.163      /* Above is a suitable barrier to ensure backend will see requests. */
   1.164      np->rx->req_prod = req_prod + i;
   1.165  
   1.166      /* Adjust our floating fill target if we risked running out of buffers. */
   1.167 -    if ( ((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
   1.168 -         ((np->rx_target *= 2) > RX_MAX_TARGET) )
   1.169 +    if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
   1.170 +         ((np->rx_target *= 2) > RX_MAX_TARGET))
   1.171          np->rx_target = RX_MAX_TARGET;
   1.172  }
   1.173  
   1.174 @@ -441,18 +431,16 @@ static int network_start_xmit(struct sk_
   1.175      netif_tx_request_t *tx;
   1.176      NETIF_RING_IDX i;
   1.177  
   1.178 -    if ( unlikely(np->tx_full) )
   1.179 -    {
   1.180 +    if (unlikely(np->tx_full)) {
   1.181          printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
   1.182          netif_stop_queue(dev);
   1.183          goto drop;
   1.184      }
   1.185  
   1.186 -    if ( unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
   1.187 -                  PAGE_SIZE) )
   1.188 -    {
   1.189 +    if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
   1.190 +                  PAGE_SIZE)) {
   1.191          struct sk_buff *nskb;
   1.192 -        if ( unlikely((nskb = alloc_xen_skb(skb->len)) == NULL) )
   1.193 +        if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
   1.194              goto drop;
   1.195          skb_put(nskb, skb->len);
   1.196          memcpy(nskb->data, skb->data, skb->len);
   1.197 @@ -463,8 +451,7 @@ static int network_start_xmit(struct sk_
   1.198      
   1.199      spin_lock_irq(&np->tx_lock);
   1.200  
   1.201 -    if ( np->backend_state != BEST_CONNECTED )
   1.202 -    {
   1.203 +    if (np->backend_state != BEST_CONNECTED) {
   1.204          spin_unlock_irq(&np->tx_lock);
   1.205          goto drop;
   1.206      }
   1.207 @@ -485,8 +472,7 @@ static int network_start_xmit(struct sk_
   1.208  
   1.209      network_tx_buf_gc(dev);
   1.210  
   1.211 -    if ( (i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1) )
   1.212 -    {
   1.213 +    if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
   1.214          np->tx_full = 1;
   1.215          netif_stop_queue(dev);
   1.216      }
   1.217 @@ -498,7 +484,7 @@ static int network_start_xmit(struct sk_
   1.218  
   1.219      /* Only notify Xen if we really have to. */
   1.220      mb();
   1.221 -    if ( np->tx->TX_TEST_IDX == i )
   1.222 +    if (np->tx->TX_TEST_IDX == i)
   1.223          notify_via_evtchn(np->evtchn);
   1.224  
   1.225      return 0;
   1.226 @@ -509,7 +495,6 @@ static int network_start_xmit(struct sk_
   1.227      return 0;
   1.228  }
   1.229  
   1.230 -
   1.231  static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
   1.232  {
   1.233      struct net_device *dev = dev_id;
   1.234 @@ -520,8 +505,7 @@ static irqreturn_t netif_int(int irq, vo
   1.235      network_tx_buf_gc(dev);
   1.236      spin_unlock_irqrestore(&np->tx_lock, flags);
   1.237  
   1.238 -    if ( (np->rx_resp_cons != np->rx->resp_prod) &&
   1.239 -         (np->user_state == UST_OPEN) )
   1.240 +    if ((np->rx_resp_cons != np->rx->resp_prod) && (np->user_state == UST_OPEN))
   1.241          netif_rx_schedule(dev);
   1.242  
   1.243      return IRQ_HANDLED;
   1.244 @@ -542,33 +526,30 @@ static int netif_poll(struct net_device 
   1.245  
   1.246      spin_lock(&np->rx_lock);
   1.247  
   1.248 -    if ( np->backend_state != BEST_CONNECTED )
   1.249 -    {
   1.250 +    if (np->backend_state != BEST_CONNECTED) {
   1.251          spin_unlock(&np->rx_lock);
   1.252          return 0;
   1.253      }
   1.254  
   1.255      skb_queue_head_init(&rxq);
   1.256  
   1.257 -    if ( (budget = *pbudget) > dev->quota )
   1.258 +    if ((budget = *pbudget) > dev->quota)
   1.259          budget = dev->quota;
   1.260  
   1.261      rp = np->rx->resp_prod;
   1.262      rmb(); /* Ensure we see queued responses up to 'rp'. */
   1.263  
   1.264 -    for ( i = np->rx_resp_cons, work_done = 0; 
   1.265 -          (i != rp) && (work_done < budget); 
   1.266 -          i++, work_done++ )
   1.267 -    {
   1.268 +    for (i = np->rx_resp_cons, work_done = 0; 
   1.269 +		    (i != rp) && (work_done < budget);
   1.270 +		    i++, work_done++) {
   1.271          rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
   1.272  
   1.273          /*
   1.274           * An error here is very odd. Usually indicates a backend bug,
   1.275           * low-memory condition, or that we didn't have reservation headroom.
   1.276           */
   1.277 -        if ( unlikely(rx->status <= 0) )
   1.278 -        {
   1.279 -            if ( net_ratelimit() )
   1.280 +        if (unlikely(rx->status <= 0)) {
   1.281 +            if (net_ratelimit())
   1.282                  printk(KERN_WARNING "Bad rx buffer (memory squeeze?).\n");
   1.283              np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
   1.284              wmb();
   1.285 @@ -608,8 +589,7 @@ static int netif_poll(struct net_device 
   1.286      balloon_update_driver_allowance(-work_done);
   1.287  
   1.288      /* Do all the remapping work, and M->P updates, in one big hypercall. */
   1.289 -    if ( likely((mcl - rx_mcl) != 0) )
   1.290 -    {
   1.291 +    if (likely((mcl - rx_mcl) != 0)) {
   1.292          mcl->op = __HYPERVISOR_mmu_update;
   1.293          mcl->args[0] = (unsigned long)rx_mmu;
   1.294          mcl->args[1] = mmu - rx_mmu;
   1.295 @@ -618,33 +598,29 @@ static int netif_poll(struct net_device 
   1.296          (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
   1.297      }
   1.298  
   1.299 -    while ( (skb = __skb_dequeue(&rxq)) != NULL )
   1.300 -    {
   1.301 +    while ((skb = __skb_dequeue(&rxq)) != NULL) {
   1.302          /*
   1.303           * Enough room in skbuff for the data we were passed? Also, Linux 
   1.304           * expects at least 16 bytes headroom in each receive buffer.
   1.305           */
   1.306 -        if ( unlikely(skb->tail > skb->end) ||
   1.307 -             unlikely((skb->data - skb->head) < 16) )
   1.308 -        {
   1.309 +        if (unlikely(skb->tail > skb->end) || 
   1.310 +			unlikely((skb->data - skb->head) < 16)) {
   1.311              nskb = NULL;
   1.312  
   1.313              /* Only copy the packet if it fits in the current MTU. */
   1.314 -            if ( skb->len <= (dev->mtu + ETH_HLEN) )
   1.315 -            {
   1.316 -                if ( (skb->tail > skb->end) && net_ratelimit() )
   1.317 +            if (skb->len <= (dev->mtu + ETH_HLEN)) {
   1.318 +                if ((skb->tail > skb->end) && net_ratelimit())
   1.319                      printk(KERN_INFO "Received packet needs %d bytes more "
   1.320                             "headroom.\n", skb->tail - skb->end);
   1.321  
   1.322 -                if ( (nskb = alloc_xen_skb(skb->len + 2)) != NULL )
   1.323 -                {
   1.324 +                if ((nskb = alloc_xen_skb(skb->len + 2)) != NULL) {
   1.325                      skb_reserve(nskb, 2);
   1.326                      skb_put(nskb, skb->len);
   1.327                      memcpy(nskb->data, skb->data, skb->len);
   1.328                      nskb->dev = skb->dev;
   1.329                  }
   1.330              }
   1.331 -            else if ( net_ratelimit() )
   1.332 +            else if (net_ratelimit())
   1.333                  printk(KERN_INFO "Received packet too big for MTU "
   1.334                         "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu);
   1.335  
   1.336 @@ -655,7 +631,7 @@ static int netif_poll(struct net_device 
   1.337              dev_kfree_skb(skb);
   1.338  
   1.339              /* Switch old for new, if we copied the buffer. */
   1.340 -            if ( (skb = nskb) == NULL )
   1.341 +            if ((skb = nskb) == NULL)
   1.342                  continue;
   1.343          }
   1.344          
   1.345 @@ -674,8 +650,8 @@ static int netif_poll(struct net_device 
   1.346  
   1.347      /* If we get a callback with very few responses, reduce fill target. */
   1.348      /* NB. Note exponential increase, linear decrease. */
   1.349 -    if ( ((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
   1.350 -         (--np->rx_target < RX_MIN_TARGET) )
   1.351 +    if (((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
   1.352 +         (--np->rx_target < RX_MIN_TARGET))
   1.353          np->rx_target = RX_MIN_TARGET;
   1.354  
   1.355      network_alloc_rx_buffers(dev);
   1.356 @@ -683,16 +659,14 @@ static int netif_poll(struct net_device 
   1.357      *pbudget   -= work_done;
   1.358      dev->quota -= work_done;
   1.359  
   1.360 -    if ( work_done < budget )
   1.361 -    {
   1.362 +    if (work_done < budget) {
   1.363          local_irq_save(flags);
   1.364  
   1.365          np->rx->event = i + 1;
   1.366      
   1.367          /* Deal with hypervisor racing our resetting of rx_event. */
   1.368          mb();
   1.369 -        if ( np->rx->resp_prod == i )
   1.370 -        {
   1.371 +        if (np->rx->resp_prod == i) {
   1.372              __netif_rx_complete(dev);
   1.373              more_to_do = 0;
   1.374          }
   1.375 @@ -755,10 +729,8 @@ static void network_connect(struct net_d
   1.376       * to avoid this but maybe it doesn't matter so much given the
   1.377       * interface has been down.
   1.378       */
   1.379 -    for ( requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++ )
   1.380 -    {
   1.381 -            if ( (unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET )
   1.382 -            {
   1.383 +    for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
   1.384 +            if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
   1.385                  struct sk_buff *skb = np->tx_skbs[i];
   1.386                  
   1.387                  tx = &np->tx->ring[requeue_idx++].req;
   1.388 @@ -775,8 +747,8 @@ static void network_connect(struct net_d
   1.389      np->tx->req_prod = requeue_idx;
   1.390  
   1.391      /* Rebuild the RX buffer freelist and the RX ring itself. */
   1.392 -    for ( requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++ )
   1.393 -        if ( (unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET )
   1.394 +    for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++)
   1.395 +        if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET)
   1.396              np->rx->ring[requeue_idx++].req.id = i;
   1.397      wmb();                
   1.398      np->rx->req_prod = requeue_idx;
   1.399 @@ -791,7 +763,7 @@ static void network_connect(struct net_d
   1.400      notify_via_evtchn(status->evtchn);  
   1.401      network_tx_buf_gc(dev);
   1.402  
   1.403 -    if ( np->user_state == UST_OPEN )
   1.404 +    if (np->user_state == UST_OPEN)
   1.405          netif_start_queue(dev);
   1.406  
   1.407      spin_unlock(&np->rx_lock);
   1.408 @@ -917,9 +889,7 @@ static void vif_disconnect(struct net_pr
   1.409   * is initiated by a special "RESET" message - disconnect could
   1.410   * just mean we're not allowed to use this interface any more.
   1.411   */
   1.412 -static void 
   1.413 -vif_reset(
   1.414 -    struct net_private *np)
   1.415 +static void vif_reset(struct net_private *np)
   1.416  {
   1.417      IPRINTK("Attempting to reconnect network interface: handle=%u\n",
   1.418              np->handle);    
   1.419 @@ -932,9 +902,8 @@ vif_reset(
   1.420   * Sets the mac and event channel from the message.
   1.421   * Binds the irq to the event channel.
   1.422   */
   1.423 -static void
   1.424 -vif_connect(
   1.425 -    struct net_private *np, netif_fe_interface_status_t *status)
   1.426 +static void 
   1.427 +vif_connect(struct net_private *np, netif_fe_interface_status_t *status)
   1.428  {
   1.429      struct net_device *dev = np->dev;
   1.430      memcpy(dev->dev_addr, status->mac, ETH_ALEN);
   1.431 @@ -959,8 +928,7 @@ static int create_netdev(int handle, str
   1.432      struct net_device *dev = NULL;
   1.433      struct net_private *np = NULL;
   1.434  
   1.435 -    if ( (dev = alloc_etherdev(sizeof(struct net_private))) == NULL )
   1.436 -    {
   1.437 +    if ((dev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
   1.438          printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
   1.439          err = -ENOMEM;
   1.440          goto exit;
   1.441 @@ -978,9 +946,9 @@ static int create_netdev(int handle, str
   1.442      np->rx_target = RX_MIN_TARGET;
   1.443  
   1.444      /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
   1.445 -    for ( i = 0; i <= NETIF_TX_RING_SIZE; i++ )
   1.446 +    for (i = 0; i <= NETIF_TX_RING_SIZE; i++)
   1.447          np->tx_skbs[i] = (void *)(i+1);
   1.448 -    for ( i = 0; i <= NETIF_RX_RING_SIZE; i++ )
   1.449 +    for (i = 0; i <= NETIF_RX_RING_SIZE; i++)
   1.450          np->rx_skbs[i] = (void *)(i+1);
   1.451  
   1.452      dev->open            = network_open;
   1.453 @@ -990,8 +958,7 @@ static int create_netdev(int handle, str
   1.454      dev->poll            = netif_poll;
   1.455      dev->weight          = 64;
   1.456      
   1.457 -    if ( (err = register_netdev(dev)) != 0 )
   1.458 -    {
   1.459 +    if ((err = register_netdev(dev)) != 0) {
   1.460          printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
   1.461          goto exit;
   1.462      }
   1.463 @@ -999,9 +966,9 @@ static int create_netdev(int handle, str
   1.464      list_add(&np->list, &dev_list);
   1.465  
   1.466    exit:
   1.467 -    if ( (err != 0) && (dev != NULL ) )
   1.468 +    if ((err != 0) && (dev != NULL ))
   1.469          kfree(dev);
   1.470 -    else if ( val != NULL )
   1.471 +    else if (val != NULL)
   1.472          *val = dev;
   1.473      return err;
   1.474  }
   1.475 @@ -1015,36 +982,34 @@ static int create_netdev(int handle, str
   1.476   * @return 0 on success, error code otherwise
   1.477   */
   1.478  static int 
   1.479 -target_vif(
   1.480 -    netif_fe_interface_status_t *status, struct net_private **np)
   1.481 +target_vif(netif_fe_interface_status_t *status, struct net_private **np)
   1.482  {
   1.483      int err = 0;
   1.484      struct net_device *dev;
   1.485  
   1.486      DPRINTK("> handle=%d\n", status->handle);
   1.487 -    if ( status->handle < 0 )
   1.488 -    {
   1.489 +    if (status->handle < 0) {
   1.490          err = -EINVAL;
   1.491          goto exit;
   1.492      }
   1.493  
   1.494 -    if ( (dev = find_dev_by_handle(status->handle)) != NULL )
   1.495 +    if ((dev = find_dev_by_handle(status->handle)) != NULL)
   1.496          goto exit;
   1.497  
   1.498 -    if ( status->status == NETIF_INTERFACE_STATUS_CLOSED )
   1.499 +    if (status->status == NETIF_INTERFACE_STATUS_CLOSED)
   1.500          goto exit;
   1.501 -    if ( status->status == NETIF_INTERFACE_STATUS_CHANGED )
   1.502 +    if (status->status == NETIF_INTERFACE_STATUS_CHANGED)
   1.503          goto exit;
   1.504  
   1.505      /* It's a new interface in a good state - create it. */
   1.506      DPRINTK("> create device...\n");
   1.507 -    if ( (err = create_netdev(status->handle, &dev)) != 0 )
   1.508 +    if ((err = create_netdev(status->handle, &dev)) != 0)
   1.509          goto exit;
   1.510  
   1.511      netctrl.interface_n++;
   1.512  
   1.513    exit:
   1.514 -    if ( np != NULL )
   1.515 +    if (np != NULL)
   1.516          *np = ((dev && !err) ? netdev_priv(dev) : NULL);
   1.517      DPRINTK("< err=%d\n", err);
   1.518      return err;
   1.519 @@ -1059,23 +1024,19 @@ static void netif_interface_status(netif
   1.520      DPRINTK("> status=%s handle=%d\n",
   1.521              status_name[status->status], status->handle);
   1.522  
   1.523 -    if ( (err = target_vif(status, &np)) != 0 )
   1.524 -    {
   1.525 +    if ((err = target_vif(status, &np)) != 0) {
   1.526          WPRINTK("Invalid netif: handle=%u\n", status->handle);
   1.527          return;
   1.528      }
   1.529  
   1.530 -    if ( np == NULL )
   1.531 -    {
   1.532 +    if (np == NULL) {
   1.533          DPRINTK("> no vif\n");
   1.534          return;
   1.535      }
   1.536  
   1.537 -    switch ( status->status )
   1.538 -    {
   1.539 +    switch (status->status) {
   1.540      case NETIF_INTERFACE_STATUS_CLOSED:
   1.541 -        switch ( np->backend_state )
   1.542 -        {
   1.543 +        switch (np->backend_state) {
   1.544          case BEST_CLOSED:
   1.545          case BEST_DISCONNECTED:
   1.546          case BEST_CONNECTED:
   1.547 @@ -1085,8 +1046,7 @@ static void netif_interface_status(netif
   1.548          break;
   1.549  
   1.550      case NETIF_INTERFACE_STATUS_DISCONNECTED:
   1.551 -        switch ( np->backend_state )
   1.552 -        {
   1.553 +        switch (np->backend_state) {
   1.554          case BEST_CLOSED:
   1.555              vif_disconnect(np);
   1.556              break;
   1.557 @@ -1098,8 +1058,7 @@ static void netif_interface_status(netif
   1.558          break;
   1.559  
   1.560      case NETIF_INTERFACE_STATUS_CONNECTED:
   1.561 -        switch ( np->backend_state )
   1.562 -        {
   1.563 +        switch (np->backend_state) {
   1.564          case BEST_CLOSED:
   1.565              WPRINTK("Unexpected netif status %s in state %s\n",
   1.566                      status_name[status->status],
   1.567 @@ -1141,20 +1100,17 @@ static void netif_driver_status(netif_fe
   1.568  static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
   1.569  {
   1.570  
   1.571 -    switch ( msg->subtype )
   1.572 -    {
   1.573 +    switch (msg->subtype) {
   1.574      case CMSG_NETIF_FE_INTERFACE_STATUS:
   1.575 -        if ( msg->length != sizeof(netif_fe_interface_status_t) )
   1.576 +        if (msg->length != sizeof(netif_fe_interface_status_t))
   1.577              goto error;
   1.578 -        netif_interface_status((netif_fe_interface_status_t *)
   1.579 -                               &msg->msg[0]);
   1.580 +        netif_interface_status((netif_fe_interface_status_t *) &msg->msg[0]);
   1.581          break;
   1.582  
   1.583      case CMSG_NETIF_FE_DRIVER_STATUS:
   1.584 -        if ( msg->length != sizeof(netif_fe_driver_status_t) )
   1.585 +        if (msg->length != sizeof(netif_fe_driver_status_t))
   1.586              goto error;
   1.587 -        netif_driver_status((netif_fe_driver_status_t *)
   1.588 -                            &msg->msg[0]);
   1.589 +        netif_driver_status((netif_fe_driver_status_t *) &msg->msg[0]);
   1.590          break;
   1.591  
   1.592      error:
   1.593 @@ -1179,8 +1135,7 @@ static int probe_interfaces(void)
   1.594  
   1.595      DPRINTK(">\n");
   1.596  
   1.597 -    for ( wait_i = 0; wait_i < wait_n; wait_i++)
   1.598 -    { 
   1.599 +    for (wait_i = 0; wait_i < wait_n; wait_i++) { 
   1.600          DPRINTK("> wait_i=%d\n", wait_i);
   1.601          conn = netctrl_connected();
   1.602          if(conn) break;
   1.603 @@ -1190,8 +1145,7 @@ static int probe_interfaces(void)
   1.604      }
   1.605  
   1.606      DPRINTK("> wait finished...\n");
   1.607 -    if ( conn <= 0 )
   1.608 -    {
   1.609 +    if (conn <= 0) {
   1.610          err = netctrl_err(-ENETDOWN);
   1.611          WPRINTK("Failed to connect all virtual interfaces: err=%d\n", err);
   1.612      }
   1.613 @@ -1223,8 +1177,7 @@ static int probe_interfaces(void)
   1.614      DPRINTK(">\n");
   1.615  
   1.616      netctrl.interface_n = 0;
   1.617 -    for ( wait_i = 0; wait_i < wait_n; wait_i++ )
   1.618 -    { 
   1.619 +    for (wait_i = 0; wait_i < wait_n; wait_i++) { 
   1.620          DPRINTK("> wait_i=%d query=%d\n", wait_i, query);
   1.621          msg.handle = query;
   1.622          memcpy(cmsg.msg, &msg, sizeof(msg));
   1.623 @@ -1236,7 +1189,7 @@ static int probe_interfaces(void)
   1.624          DPRINTK("> err=%d\n", err);
   1.625          if(err) goto exit;
   1.626          DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
   1.627 -        if((int)reply->handle < 0){
   1.628 +        if((int)reply->handle < 0) {
   1.629              // No more interfaces.
   1.630              break;
   1.631          }
   1.632 @@ -1246,8 +1199,7 @@ static int probe_interfaces(void)
   1.633      }
   1.634  
   1.635    exit:
   1.636 -    if ( err )
   1.637 -    {
   1.638 +    if (err) {
   1.639          err = netctrl_err(-ENETDOWN);
   1.640          WPRINTK("Connecting virtual network interfaces failed: err=%d\n", err);
   1.641      }
   1.642 @@ -1262,22 +1214,20 @@ static int probe_interfaces(void)
   1.643   * We use this notifier to send out a fake ARP reply to reset switches and
   1.644   * router ARP caches when an IP interface is brought up on a VIF.
   1.645   */
   1.646 -static int inetdev_notify(struct notifier_block *this, 
   1.647 -                          unsigned long event, 
   1.648 -                          void *ptr)
   1.649 +static int 
   1.650 +inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
   1.651  {
   1.652      struct in_ifaddr  *ifa = (struct in_ifaddr *)ptr; 
   1.653      struct net_device *dev = ifa->ifa_dev->dev;
   1.654      struct list_head  *ent;
   1.655      struct net_private *np;
   1.656  
   1.657 -    if ( event != NETDEV_UP )
   1.658 +    if (event != NETDEV_UP)
   1.659          goto out;
   1.660  
   1.661 -    list_for_each ( ent, &dev_list )
   1.662 -    {
   1.663 +    list_for_each (ent, &dev_list) {
   1.664          np = list_entry(ent, struct net_private, list);
   1.665 -        if ( np->dev == dev )
   1.666 +        if (np->dev == dev)
   1.667              (void)send_fake_arp(dev);
   1.668      }
   1.669          
   1.670 @@ -1295,7 +1245,7 @@ static int __init netif_init(void)
   1.671  {
   1.672      int err = 0;
   1.673  
   1.674 -    if ( xen_start_info.flags & SIF_INITDOMAIN )
   1.675 +    if (xen_start_info.flags & SIF_INITDOMAIN)
   1.676          return 0;
   1.677  
   1.678      IPRINTK("Initialising virtual ethernet driver.\n");
   1.679 @@ -1306,7 +1256,7 @@ static int __init netif_init(void)
   1.680                                      CALLBACK_IN_BLOCKING_CONTEXT);
   1.681      send_driver_status(1);
   1.682      err = probe_interfaces();
   1.683 -    if ( err )
   1.684 +    if (err)
   1.685          ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
   1.686  
   1.687      DPRINTK("< err=%d\n", err);
   1.688 @@ -1338,8 +1288,7 @@ void netif_suspend(void)
   1.689      struct list_head *ent;
   1.690      struct net_private *np;
   1.691      
   1.692 -    list_for_each ( ent, &dev_list )
   1.693 -    {
   1.694 +    list_for_each (ent, &dev_list) {
   1.695          np = list_entry(ent, struct net_private, list);
   1.696          vif_suspend(np);
   1.697      }
   1.698 @@ -1350,8 +1299,7 @@ void netif_resume(void)
   1.699      struct list_head *ent;
   1.700      struct net_private *np;
   1.701  
   1.702 -    list_for_each ( ent, &dev_list )
   1.703 -    {
   1.704 +    list_for_each (ent, &dev_list) {
   1.705          np = list_entry(ent, struct net_private, list);
   1.706          vif_resume(np);
   1.707      }