ia64/xen-unstable

changeset 3410:7a39463e17ce

bitkeeper revision 1.1159.217.4 (41e3e505Zh28C6DlwmocaVnUyA-BcA)

remove hard tabs
author kaf24@scramble.cl.cam.ac.uk
date Tue Jan 11 14:39:01 2005 +0000 (2005-01-11)
parents 2162fcd0d254
children 9b5686c89524
files linux-2.6.10-xen-sparse/drivers/xen/blkfront/blkfront.c
line diff
     1.1 --- a/linux-2.6.10-xen-sparse/drivers/xen/blkfront/blkfront.c	Tue Jan 11 14:35:09 2005 +0000
     1.2 +++ b/linux-2.6.10-xen-sparse/drivers/xen/blkfront/blkfront.c	Tue Jan 11 14:39:01 2005 +0000
     1.3 @@ -420,14 +420,14 @@ static irqreturn_t blkif_int(int irq, vo
     1.4  
     1.5      for ( i = blk_ring.rsp_cons; i != rp; i++ )
     1.6      {
     1.7 -	unsigned long id;
     1.8 +        unsigned long id;
     1.9  
    1.10          bret = RING_GET_RESPONSE(BLKIF_RING, &blk_ring, i);
    1.11 -	id = bret->id;
    1.12 -	req = (struct request *)rec_ring[id].id;
    1.13 -	blkif_completion( &rec_ring[id] );
    1.14 +        id = bret->id;
    1.15 +        req = (struct request *)rec_ring[id].id;
    1.16 +        blkif_completion( &rec_ring[id] );
    1.17  
    1.18 -	ADD_ID_TO_FREELIST(id); /* overwrites req */
    1.19 +        ADD_ID_TO_FREELIST(id); /* overwrites req */
    1.20  
    1.21          switch ( bret->operation )
    1.22          {
    1.23 @@ -436,7 +436,7 @@ static irqreturn_t blkif_int(int irq, vo
    1.24              if ( unlikely(bret->status != BLKIF_RSP_OKAY) )
    1.25                  DPRINTK("Bad return from blkdev data request: %x\n",
    1.26                          bret->status);
    1.27 -	    
    1.28 +     
    1.29              if ( unlikely(end_that_request_first
    1.30                            (req, 
    1.31                             (bret->status == BLKIF_RSP_OKAY),
    1.32 @@ -509,7 +509,7 @@ static void kick_pending_request_queues(
    1.33      /* We kick pending request queues if the ring is reasonably empty. */
    1.34      if ( (nr_pending != 0) && 
    1.35           (RING_PENDING_REQUESTS(BLKIF_RING, &blk_ring) < 
    1.36 -            (RING_SIZE(BLKIF_RING, &blk_ring) >> 1)) )
    1.37 +          (RING_SIZE(BLKIF_RING, &blk_ring) >> 1)) )
    1.38      {
    1.39          /* Attempt to drain the queue, but bail if the ring becomes full. */
    1.40          while ( (nr_pending != 0) && !RING_FULL(BLKIF_RING, &blk_ring) )
    1.41 @@ -632,7 +632,7 @@ int blkif_ioctl(struct inode *inode, str
    1.42          if (!argument) return -EINVAL;
    1.43  
    1.44          /* We don't have real geometry info, but let's at least return
    1.45 -	   values consistent with the size of the device */
    1.46 +           values consistent with the size of the device */
    1.47  
    1.48          heads = 0xff;
    1.49          sectors = 0x3f; 
    1.50 @@ -650,7 +650,7 @@ int blkif_ioctl(struct inode *inode, str
    1.51          if (!argument) return -EINVAL;
    1.52  
    1.53          /* We don't have real geometry info, but let's at least return
    1.54 -	   values consistent with the size of the device */
    1.55 +           values consistent with the size of the device */
    1.56  
    1.57          heads = 0xff;
    1.58          sectors = 0x3f; 
    1.59 @@ -807,13 +807,13 @@ static int blkif_queue_request(unsigned 
    1.60               (sg_next_sect == sector_number) )
    1.61          {
    1.62              req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, 
    1.63 -                    blk_ring.req_prod_pvt - 1);
    1.64 +                                   blk_ring.req_prod_pvt - 1);
    1.65              bh = (struct buffer_head *)id;
    1.66 -	    
    1.67 +     
    1.68              bh->b_reqnext = (struct buffer_head *)rec_ring[req->id].id;
    1.69 -	    
    1.70 +     
    1.71  
    1.72 -	    rec_ring[req->id].id = id;
    1.73 +            rec_ring[req->id].id = id;
    1.74  
    1.75              req->frame_and_sects[req->nr_segments] = 
    1.76                  buffer_ma | (fsect<<3) | lsect;
    1.77 @@ -966,16 +966,16 @@ static void blkif_int(int irq, void *dev
    1.78  
    1.79      for ( i = blk_ring.rsp_cons; i != rp; i++ )
    1.80      {
    1.81 -	unsigned long id;
    1.82 +        unsigned long id;
    1.83          blkif_response_t *bret;
    1.84          
    1.85          bret = RING_GET_RESPONSE(BLKIF_RING, &blk_ring, i);
    1.86 -	id = bret->id;
    1.87 -	bh = (struct buffer_head *)rec_ring[id].id; 
    1.88 +        id = bret->id;
    1.89 +        bh = (struct buffer_head *)rec_ring[id].id; 
    1.90  
    1.91 -	blkif_completion( &rec_ring[id] );
    1.92 +        blkif_completion( &rec_ring[id] );
    1.93  
    1.94 -	ADD_ID_TO_FREELIST(id);
    1.95 +        ADD_ID_TO_FREELIST(id);
    1.96  
    1.97          switch ( bret->operation )
    1.98          {
    1.99 @@ -1000,7 +1000,7 @@ static void blkif_int(int irq, void *dev
   1.100              BUG();
   1.101          }
   1.102  
   1.103 -	}
   1.104 +    }
   1.105      blk_ring.rsp_cons = i;
   1.106      
   1.107      kick_pending_request_queues();
   1.108 @@ -1149,7 +1149,7 @@ static void blkif_recover(void)
   1.109          if ( rec_ring[i].id >= PAGE_OFFSET )
   1.110          {
   1.111              req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, 
   1.112 -                    blk_ring.req_prod_pvt);
   1.113 +                                   blk_ring.req_prod_pvt);
   1.114              translate_req_to_mfn(req, &rec_ring[i]);
   1.115              blk_ring.req_prod_pvt++;
   1.116          }
   1.117 @@ -1159,7 +1159,7 @@ static void blkif_recover(void)
   1.118      for ( i = 0; i < blk_ring.req_prod_pvt; i++ ) 
   1.119      {
   1.120          req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, i);
   1.121 -        rec_ring[i].id = req->id;		
   1.122 +        rec_ring[i].id = req->id;  
   1.123          req->id = i;
   1.124          translate_req_to_pfn(&rec_ring[i], req);
   1.125      }
   1.126 @@ -1283,7 +1283,7 @@ static void blkif_status(blkif_fe_interf
   1.127          }
   1.128          break;
   1.129  
   1.130 -   case BLKIF_INTERFACE_STATUS_CHANGED:
   1.131 +    case BLKIF_INTERFACE_STATUS_CHANGED:
   1.132          switch ( blkif_state )
   1.133          {
   1.134          case BLKIF_STATE_CLOSED:
   1.135 @@ -1294,7 +1294,7 @@ static void blkif_status(blkif_fe_interf
   1.136              vbd_update();
   1.137              break;
   1.138          }
   1.139 -       break;
   1.140 +        break;
   1.141  
   1.142      default:
   1.143          WPRINTK(" Invalid blkif status: %d\n", status->status);
   1.144 @@ -1362,7 +1362,7 @@ int __init xlblk_init(void)
   1.145  
   1.146      rec_ring_free = 0;
   1.147      for ( i = 0; i < RING_SIZE(BLKIF_RING, &blk_ring); i++ )
   1.148 -	rec_ring[i].id = i+1;
   1.149 +        rec_ring[i].id = i+1;
   1.150      rec_ring[RING_SIZE(BLKIF_RING, &blk_ring)-1].id = 0x0fffffff;
   1.151  
   1.152      (void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx,
   1.153 @@ -1391,13 +1391,13 @@ void blkif_completion(blkif_request_t *r
   1.154      switch ( req->operation )
   1.155      {
   1.156      case BLKIF_OP_READ:
   1.157 -	for ( i = 0; i < req->nr_segments; i++ )
   1.158 -	{
   1.159 -	    unsigned long pfn = req->frame_and_sects[i] >> PAGE_SHIFT;
   1.160 -	    unsigned long mfn = phys_to_machine_mapping[pfn];
   1.161 -	    xen_machphys_update(mfn, pfn);
   1.162 -	}
   1.163 -	break;
   1.164 +        for ( i = 0; i < req->nr_segments; i++ )
   1.165 +        {
   1.166 +            unsigned long pfn = req->frame_and_sects[i] >> PAGE_SHIFT;
   1.167 +            unsigned long mfn = phys_to_machine_mapping[pfn];
   1.168 +            xen_machphys_update(mfn, pfn);
   1.169 +        }
   1.170 +        break;
   1.171      }
   1.172      
   1.173  }