ia64/xen-unstable

changeset 7017:d7c794130ac5

Indentation cleanups in linux driver code.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Sep 22 14:04:14 2005 +0100 (2005-09-22)
parents 4cff74aa6246
children ecc77b1c8612
files linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c linux-2.6-xen-sparse/drivers/xen/blkback/common.h linux-2.6-xen-sparse/drivers/xen/blkback/interface.c linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c linux-2.6-xen-sparse/drivers/xen/blkfront/block.h linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c linux-2.6-xen-sparse/drivers/xen/blktap/common.h linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c linux-2.6-xen-sparse/drivers/xen/console/console.c linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c linux-2.6-xen-sparse/drivers/xen/tpmback/common.h linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h linux-2.6-xen-sparse/drivers/xen/usbback/control.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Thu Sep 22 14:01:01 2005 +0100
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Thu Sep 22 14:04:14 2005 +0100
     1.3 @@ -28,12 +28,12 @@
     1.4  #define BATCH_PER_DOMAIN 16
     1.5  
     1.6  static unsigned long mmap_vstart;
     1.7 -#define MMAP_PAGES                                              \
     1.8 -    (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
     1.9 -#define MMAP_VADDR(_req,_seg)                                   \
    1.10 -    (mmap_vstart +                                              \
    1.11 -     ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +    \
    1.12 -     ((_seg) * PAGE_SIZE))
    1.13 +#define MMAP_PAGES						\
    1.14 +	(MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
    1.15 +#define MMAP_VADDR(_req,_seg)						\
    1.16 +	(mmap_vstart +							\
    1.17 +	 ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +	\
    1.18 +	 ((_seg) * PAGE_SIZE))
    1.19  
    1.20  /*
    1.21   * Each outstanding request that we've passed to the lower device layers has a 
    1.22 @@ -42,12 +42,12 @@ static unsigned long mmap_vstart;
    1.23   * response queued for it, with the saved 'id' passed back.
    1.24   */
    1.25  typedef struct {
    1.26 -    blkif_t       *blkif;
    1.27 -    unsigned long  id;
    1.28 -    int            nr_pages;
    1.29 -    atomic_t       pendcnt;
    1.30 -    unsigned short operation;
    1.31 -    int            status;
    1.32 +	blkif_t       *blkif;
    1.33 +	unsigned long  id;
    1.34 +	int            nr_pages;
    1.35 +	atomic_t       pendcnt;
    1.36 +	unsigned short operation;
    1.37 +	int            status;
    1.38  } pending_req_t;
    1.39  
    1.40  /*
    1.41 @@ -68,14 +68,13 @@ static PEND_RING_IDX pending_prod, pendi
    1.42  static request_queue_t *plugged_queue;
    1.43  static inline void flush_plugged_queue(void)
    1.44  {
    1.45 -    request_queue_t *q = plugged_queue;
    1.46 -    if ( q != NULL )
    1.47 -    {
    1.48 -        if ( q->unplug_fn != NULL )
    1.49 -            q->unplug_fn(q);
    1.50 -        blk_put_queue(q);
    1.51 -        plugged_queue = NULL;
    1.52 -    }
    1.53 +	request_queue_t *q = plugged_queue;
    1.54 +	if (q != NULL) {
    1.55 +		if ( q->unplug_fn != NULL )
    1.56 +			q->unplug_fn(q);
    1.57 +		blk_put_queue(q);
    1.58 +		plugged_queue = NULL;
    1.59 +	}
    1.60  }
    1.61  
    1.62  /* When using grant tables to map a frame for device access then the
    1.63 @@ -106,24 +105,23 @@ static void make_response(blkif_t *blkif
    1.64  
    1.65  static void fast_flush_area(int idx, int nr_pages)
    1.66  {
    1.67 -    struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    1.68 -    unsigned int i, invcount = 0;
    1.69 -    u16 handle;
    1.70 +	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    1.71 +	unsigned int i, invcount = 0;
    1.72 +	u16 handle;
    1.73  
    1.74 -    for ( i = 0; i < nr_pages; i++ )
    1.75 -    {
    1.76 -        if ( BLKBACK_INVALID_HANDLE != ( handle = pending_handle(idx, i) ) )
    1.77 -        {
    1.78 -            unmap[i].host_addr      = MMAP_VADDR(idx, i);
    1.79 -            unmap[i].dev_bus_addr   = 0;
    1.80 -            unmap[i].handle         = handle;
    1.81 -            pending_handle(idx, i)  = BLKBACK_INVALID_HANDLE;
    1.82 -            invcount++;
    1.83 -        }
    1.84 -    }
    1.85 -    if ( unlikely(HYPERVISOR_grant_table_op(
    1.86 -                    GNTTABOP_unmap_grant_ref, unmap, invcount)))
    1.87 -        BUG();
    1.88 +	for (i = 0; i < nr_pages; i++) {
    1.89 +		handle = pending_handle(idx, i);
    1.90 +		if (handle == BLKBACK_INVALID_HANDLE)
    1.91 +			continue;
    1.92 +		unmap[i].host_addr      = MMAP_VADDR(idx, i);
    1.93 +		unmap[i].dev_bus_addr   = 0;
    1.94 +		unmap[i].handle         = handle;
    1.95 +		pending_handle(idx, i)  = BLKBACK_INVALID_HANDLE;
    1.96 +		invcount++;
    1.97 +	}
    1.98 +
    1.99 +	BUG_ON(HYPERVISOR_grant_table_op(
   1.100 +		GNTTABOP_unmap_grant_ref, unmap, invcount));
   1.101  }
   1.102  
   1.103  
   1.104 @@ -136,34 +134,38 @@ static spinlock_t blkio_schedule_list_lo
   1.105  
   1.106  static int __on_blkdev_list(blkif_t *blkif)
   1.107  {
   1.108 -    return blkif->blkdev_list.next != NULL;
   1.109 +	return blkif->blkdev_list.next != NULL;
   1.110  }
   1.111  
   1.112  static void remove_from_blkdev_list(blkif_t *blkif)
   1.113  {
   1.114 -    unsigned long flags;
   1.115 -    if ( !__on_blkdev_list(blkif) ) return;
   1.116 -    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   1.117 -    if ( __on_blkdev_list(blkif) )
   1.118 -    {
   1.119 -        list_del(&blkif->blkdev_list);
   1.120 -        blkif->blkdev_list.next = NULL;
   1.121 -        blkif_put(blkif);
   1.122 -    }
   1.123 -    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   1.124 +	unsigned long flags;
   1.125 +
   1.126 +	if (!__on_blkdev_list(blkif))
   1.127 +		return;
   1.128 +
   1.129 +	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   1.130 +	if (__on_blkdev_list(blkif)) {
   1.131 +		list_del(&blkif->blkdev_list);
   1.132 +		blkif->blkdev_list.next = NULL;
   1.133 +		blkif_put(blkif);
   1.134 +	}
   1.135 +	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   1.136  }
   1.137  
   1.138  static void add_to_blkdev_list_tail(blkif_t *blkif)
   1.139  {
   1.140 -    unsigned long flags;
   1.141 -    if ( __on_blkdev_list(blkif) ) return;
   1.142 -    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   1.143 -    if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
   1.144 -    {
   1.145 -        list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
   1.146 -        blkif_get(blkif);
   1.147 -    }
   1.148 -    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   1.149 +	unsigned long flags;
   1.150 +
   1.151 +	if (__on_blkdev_list(blkif))
   1.152 +		return;
   1.153 +
   1.154 +	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   1.155 +	if (!__on_blkdev_list(blkif) && (blkif->status == CONNECTED)) {
   1.156 +		list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
   1.157 +		blkif_get(blkif);
   1.158 +	}
   1.159 +	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   1.160  }
   1.161  
   1.162  
   1.163 @@ -175,54 +177,53 @@ static DECLARE_WAIT_QUEUE_HEAD(blkio_sch
   1.164  
   1.165  static int blkio_schedule(void *arg)
   1.166  {
   1.167 -    DECLARE_WAITQUEUE(wq, current);
   1.168 +	DECLARE_WAITQUEUE(wq, current);
   1.169  
   1.170 -    blkif_t          *blkif;
   1.171 -    struct list_head *ent;
   1.172 +	blkif_t          *blkif;
   1.173 +	struct list_head *ent;
   1.174  
   1.175 -    daemonize("xenblkd");
   1.176 +	daemonize("xenblkd");
   1.177  
   1.178 -    for ( ; ; )
   1.179 -    {
   1.180 -        /* Wait for work to do. */
   1.181 -        add_wait_queue(&blkio_schedule_wait, &wq);
   1.182 -        set_current_state(TASK_INTERRUPTIBLE);
   1.183 -        if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
   1.184 -             list_empty(&blkio_schedule_list) )
   1.185 -            schedule();
   1.186 -        __set_current_state(TASK_RUNNING);
   1.187 -        remove_wait_queue(&blkio_schedule_wait, &wq);
   1.188 +	for (;;) {
   1.189 +		/* Wait for work to do. */
   1.190 +		add_wait_queue(&blkio_schedule_wait, &wq);
   1.191 +		set_current_state(TASK_INTERRUPTIBLE);
   1.192 +		if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
   1.193 +		     list_empty(&blkio_schedule_list) )
   1.194 +			schedule();
   1.195 +		__set_current_state(TASK_RUNNING);
   1.196 +		remove_wait_queue(&blkio_schedule_wait, &wq);
   1.197  
   1.198 -        /* Queue up a batch of requests. */
   1.199 -        while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
   1.200 -                !list_empty(&blkio_schedule_list) )
   1.201 -        {
   1.202 -            ent = blkio_schedule_list.next;
   1.203 -            blkif = list_entry(ent, blkif_t, blkdev_list);
   1.204 -            blkif_get(blkif);
   1.205 -            remove_from_blkdev_list(blkif);
   1.206 -            if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
   1.207 -                add_to_blkdev_list_tail(blkif);
   1.208 -            blkif_put(blkif);
   1.209 -        }
   1.210 +		/* Queue up a batch of requests. */
   1.211 +		while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
   1.212 +		       !list_empty(&blkio_schedule_list)) {
   1.213 +			ent = blkio_schedule_list.next;
   1.214 +			blkif = list_entry(ent, blkif_t, blkdev_list);
   1.215 +			blkif_get(blkif);
   1.216 +			remove_from_blkdev_list(blkif);
   1.217 +			if (do_block_io_op(blkif, BATCH_PER_DOMAIN))
   1.218 +				add_to_blkdev_list_tail(blkif);
   1.219 +			blkif_put(blkif);
   1.220 +		}
   1.221  
   1.222 -        /* Push the batch through to disc. */
   1.223 -        flush_plugged_queue();
   1.224 -    }
   1.225 +		/* Push the batch through to disc. */
   1.226 +		flush_plugged_queue();
   1.227 +	}
   1.228  }
   1.229  
   1.230  static void maybe_trigger_blkio_schedule(void)
   1.231  {
   1.232 -    /*
   1.233 -     * Needed so that two processes, who together make the following predicate
   1.234 -     * true, don't both read stale values and evaluate the predicate
   1.235 -     * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
   1.236 -     */
   1.237 -    smp_mb();
   1.238 +	/*
   1.239 +	 * Needed so that two processes, which together make the following
   1.240 +	 * predicate true, don't both read stale values and evaluate the
   1.241 +	 * predicate incorrectly. Incredibly unlikely to stall the scheduler
   1.242 +	 * on x86, but...
   1.243 +	 */
   1.244 +	smp_mb();
   1.245  
   1.246 -    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
   1.247 -         !list_empty(&blkio_schedule_list) )
   1.248 -        wake_up(&blkio_schedule_wait);
   1.249 +	if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
   1.250 +	    !list_empty(&blkio_schedule_list))
   1.251 +		wake_up(&blkio_schedule_wait);
   1.252  }
   1.253  
   1.254  
   1.255 @@ -233,36 +234,34 @@ static void maybe_trigger_blkio_schedule
   1.256  
   1.257  static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
   1.258  {
   1.259 -    unsigned long flags;
   1.260 +	unsigned long flags;
   1.261  
   1.262 -    /* An error fails the entire request. */
   1.263 -    if ( !uptodate )
   1.264 -    {
   1.265 -        DPRINTK("Buffer not up-to-date at end of operation\n");
   1.266 -        pending_req->status = BLKIF_RSP_ERROR;
   1.267 -    }
   1.268 +	/* An error fails the entire request. */
   1.269 +	if (!uptodate) {
   1.270 +		DPRINTK("Buffer not up-to-date at end of operation\n");
   1.271 +		pending_req->status = BLKIF_RSP_ERROR;
   1.272 +	}
   1.273  
   1.274 -    if ( atomic_dec_and_test(&pending_req->pendcnt) )
   1.275 -    {
   1.276 -        int pending_idx = pending_req - pending_reqs;
   1.277 -        fast_flush_area(pending_idx, pending_req->nr_pages);
   1.278 -        make_response(pending_req->blkif, pending_req->id,
   1.279 -                      pending_req->operation, pending_req->status);
   1.280 -        blkif_put(pending_req->blkif);
   1.281 -        spin_lock_irqsave(&pend_prod_lock, flags);
   1.282 -        pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   1.283 -        spin_unlock_irqrestore(&pend_prod_lock, flags);
   1.284 -        maybe_trigger_blkio_schedule();
   1.285 -    }
   1.286 +	if (atomic_dec_and_test(&pending_req->pendcnt)) {
   1.287 +		int pending_idx = pending_req - pending_reqs;
   1.288 +		fast_flush_area(pending_idx, pending_req->nr_pages);
   1.289 +		make_response(pending_req->blkif, pending_req->id,
   1.290 +			      pending_req->operation, pending_req->status);
   1.291 +		blkif_put(pending_req->blkif);
   1.292 +		spin_lock_irqsave(&pend_prod_lock, flags);
   1.293 +		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   1.294 +		spin_unlock_irqrestore(&pend_prod_lock, flags);
   1.295 +		maybe_trigger_blkio_schedule();
   1.296 +	}
   1.297  }
   1.298  
   1.299  static int end_block_io_op(struct bio *bio, unsigned int done, int error)
   1.300  {
   1.301 -    if ( bio->bi_size != 0 )
   1.302 -        return 1;
   1.303 -    __end_block_io_op(bio->bi_private, !error);
   1.304 -    bio_put(bio);
   1.305 -    return error;
   1.306 +	if (bio->bi_size != 0)
   1.307 +		return 1;
   1.308 +	__end_block_io_op(bio->bi_private, !error);
   1.309 +	bio_put(bio);
   1.310 +	return error;
   1.311  }
   1.312  
   1.313  
   1.314 @@ -272,10 +271,10 @@ static int end_block_io_op(struct bio *b
   1.315  
   1.316  irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
   1.317  {
   1.318 -    blkif_t *blkif = dev_id;
   1.319 -    add_to_blkdev_list_tail(blkif);
   1.320 -    maybe_trigger_blkio_schedule();
   1.321 -    return IRQ_HANDLED;
   1.322 +	blkif_t *blkif = dev_id;
   1.323 +	add_to_blkdev_list_tail(blkif);
   1.324 +	maybe_trigger_blkio_schedule();
   1.325 +	return IRQ_HANDLED;
   1.326  }
   1.327  
   1.328  
   1.329 @@ -286,183 +285,174 @@ irqreturn_t blkif_be_int(int irq, void *
   1.330  
   1.331  static int do_block_io_op(blkif_t *blkif, int max_to_do)
   1.332  {
   1.333 -    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   1.334 -    blkif_request_t *req;
   1.335 -    RING_IDX i, rp;
   1.336 -    int more_to_do = 0;
   1.337 +	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   1.338 +	blkif_request_t *req;
   1.339 +	RING_IDX i, rp;
   1.340 +	int more_to_do = 0;
   1.341  
   1.342 -    rp = blk_ring->sring->req_prod;
   1.343 -    rmb(); /* Ensure we see queued requests up to 'rp'. */
   1.344 +	rp = blk_ring->sring->req_prod;
   1.345 +	rmb(); /* Ensure we see queued requests up to 'rp'. */
   1.346  
   1.347 -    for ( i = blk_ring->req_cons; 
   1.348 -         (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
   1.349 -          i++ )
   1.350 -    {
   1.351 -        if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
   1.352 -        {
   1.353 -            more_to_do = 1;
   1.354 -            break;
   1.355 -        }
   1.356 +	for (i = blk_ring->req_cons; 
   1.357 +	     (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
   1.358 +	     i++) {
   1.359 +		if ((max_to_do-- == 0) ||
   1.360 +		    (NR_PENDING_REQS == MAX_PENDING_REQS)) {
   1.361 +			more_to_do = 1;
   1.362 +			break;
   1.363 +		}
   1.364          
   1.365 -        req = RING_GET_REQUEST(blk_ring, i);
   1.366 -        switch ( req->operation )
   1.367 -        {
   1.368 -        case BLKIF_OP_READ:
   1.369 -        case BLKIF_OP_WRITE:
   1.370 -            dispatch_rw_block_io(blkif, req);
   1.371 -            break;
   1.372 +		req = RING_GET_REQUEST(blk_ring, i);
   1.373 +		switch (req->operation) {
   1.374 +		case BLKIF_OP_READ:
   1.375 +		case BLKIF_OP_WRITE:
   1.376 +			dispatch_rw_block_io(blkif, req);
   1.377 +			break;
   1.378  
   1.379 -        default:
   1.380 -            DPRINTK("error: unknown block io operation [%d]\n",
   1.381 -                    req->operation);
   1.382 -            make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
   1.383 -            break;
   1.384 -        }
   1.385 -    }
   1.386 +		default:
   1.387 +			DPRINTK("error: unknown block io operation [%d]\n",
   1.388 +				req->operation);
   1.389 +			make_response(blkif, req->id, req->operation,
   1.390 +				      BLKIF_RSP_ERROR);
   1.391 +			break;
   1.392 +		}
   1.393 +	}
   1.394  
   1.395 -    blk_ring->req_cons = i;
   1.396 -    return more_to_do;
   1.397 +	blk_ring->req_cons = i;
   1.398 +	return more_to_do;
   1.399  }
   1.400  
   1.401  static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
   1.402  {
   1.403 -    extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 
   1.404 -    int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
   1.405 -    unsigned long fas = 0;
   1.406 -    int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
   1.407 -    pending_req_t *pending_req;
   1.408 -    struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   1.409 -    struct phys_req preq;
   1.410 -    struct { 
   1.411 -        unsigned long buf; unsigned int nsec;
   1.412 -    } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   1.413 -    unsigned int nseg;
   1.414 -    struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   1.415 -    int nbio = 0;
   1.416 -    request_queue_t *q;
   1.417 +	extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 
   1.418 +	int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
   1.419 +	unsigned long fas = 0;
   1.420 +	int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
   1.421 +	pending_req_t *pending_req;
   1.422 +	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   1.423 +	struct phys_req preq;
   1.424 +	struct { 
   1.425 +		unsigned long buf; unsigned int nsec;
   1.426 +	} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   1.427 +	unsigned int nseg;
   1.428 +	struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   1.429 +	int nbio = 0;
   1.430 +	request_queue_t *q;
   1.431  
   1.432 -    /* Check that number of segments is sane. */
   1.433 -    nseg = req->nr_segments;
   1.434 -    if ( unlikely(nseg == 0) || 
   1.435 -         unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
   1.436 -    {
   1.437 -        DPRINTK("Bad number of segments in request (%d)\n", nseg);
   1.438 -        goto bad_descriptor;
   1.439 -    }
   1.440 +	/* Check that number of segments is sane. */
   1.441 +	nseg = req->nr_segments;
   1.442 +	if (unlikely(nseg == 0) || 
   1.443 +	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
   1.444 +		DPRINTK("Bad number of segments in request (%d)\n", nseg);
   1.445 +		goto bad_descriptor;
   1.446 +	}
   1.447  
   1.448 -    preq.dev           = req->handle;
   1.449 -    preq.sector_number = req->sector_number;
   1.450 -    preq.nr_sects      = 0;
   1.451 +	preq.dev           = req->handle;
   1.452 +	preq.sector_number = req->sector_number;
   1.453 +	preq.nr_sects      = 0;
   1.454  
   1.455 -    for ( i = 0; i < nseg; i++ )
   1.456 -    {
   1.457 -        fas         = req->frame_and_sects[i];
   1.458 -        seg[i].nsec = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
   1.459 +	for (i = 0; i < nseg; i++) {
   1.460 +		fas         = req->frame_and_sects[i];
   1.461 +		seg[i].nsec = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
   1.462 +
   1.463 +		if (seg[i].nsec <= 0)
   1.464 +			goto bad_descriptor;
   1.465 +		preq.nr_sects += seg[i].nsec;
   1.466  
   1.467 -        if ( seg[i].nsec <= 0 )
   1.468 -            goto bad_descriptor;
   1.469 -        preq.nr_sects += seg[i].nsec;
   1.470 -
   1.471 -        map[i].host_addr = MMAP_VADDR(pending_idx, i);
   1.472 -        map[i].dom = blkif->domid;
   1.473 -        map[i].ref = blkif_gref_from_fas(fas);
   1.474 -        map[i].flags = GNTMAP_host_map;
   1.475 -        if ( operation == WRITE )
   1.476 -            map[i].flags |= GNTMAP_readonly;
   1.477 -    }
   1.478 +		map[i].host_addr = MMAP_VADDR(pending_idx, i);
   1.479 +		map[i].dom = blkif->domid;
   1.480 +		map[i].ref = blkif_gref_from_fas(fas);
   1.481 +		map[i].flags = GNTMAP_host_map;
   1.482 +		if ( operation == WRITE )
   1.483 +			map[i].flags |= GNTMAP_readonly;
   1.484 +	}
   1.485  
   1.486 -    if ( unlikely(HYPERVISOR_grant_table_op(
   1.487 -                    GNTTABOP_map_grant_ref, map, nseg)))
   1.488 -        BUG();
   1.489 +	BUG_ON(HYPERVISOR_grant_table_op(
   1.490 +		GNTTABOP_map_grant_ref, map, nseg));
   1.491  
   1.492 -    for ( i = 0; i < nseg; i++ )
   1.493 -    {
   1.494 -        if ( unlikely(map[i].handle < 0) )
   1.495 -        {
   1.496 -            DPRINTK("invalid buffer -- could not remap it\n");
   1.497 -            fast_flush_area(pending_idx, nseg);
   1.498 -            goto bad_descriptor;
   1.499 -        }
   1.500 +	for (i = 0; i < nseg; i++) {
   1.501 +		if (unlikely(map[i].handle < 0)) {
   1.502 +			DPRINTK("invalid buffer -- could not remap it\n");
   1.503 +			fast_flush_area(pending_idx, nseg);
   1.504 +			goto bad_descriptor;
   1.505 +		}
   1.506  
   1.507 -        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
   1.508 -            FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
   1.509 +		phys_to_machine_mapping[__pa(MMAP_VADDR(
   1.510 +			pending_idx, i)) >> PAGE_SHIFT] =
   1.511 +			FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
   1.512  
   1.513 -        pending_handle(pending_idx, i) = map[i].handle;
   1.514 -    }
   1.515 +		pending_handle(pending_idx, i) = map[i].handle;
   1.516 +	}
   1.517  
   1.518 -    for ( i = 0; i < nseg; i++ )
   1.519 -    {
   1.520 -        fas         = req->frame_and_sects[i];
   1.521 -        seg[i].buf  = map[i].dev_bus_addr | (blkif_first_sect(fas) << 9);
   1.522 -    }
   1.523 +	for (i = 0; i < nseg; i++) {
   1.524 +		fas         = req->frame_and_sects[i];
   1.525 +		seg[i].buf  = map[i].dev_bus_addr | 
   1.526 +			(blkif_first_sect(fas) << 9);
   1.527 +	}
   1.528  
   1.529 -    if ( vbd_translate(&preq, blkif, operation) != 0 )
   1.530 -    {
   1.531 -        DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 
   1.532 -                operation == READ ? "read" : "write", preq.sector_number,
   1.533 -                preq.sector_number + preq.nr_sects, preq.dev); 
   1.534 -        goto bad_descriptor;
   1.535 -    }
   1.536 +	if (vbd_translate(&preq, blkif, operation) != 0) {
   1.537 +		DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 
   1.538 +			operation == READ ? "read" : "write",
   1.539 +			preq.sector_number,
   1.540 +			preq.sector_number + preq.nr_sects, preq.dev); 
   1.541 +		goto bad_descriptor;
   1.542 +	}
   1.543  
   1.544 -    pending_req = &pending_reqs[pending_idx];
   1.545 -    pending_req->blkif     = blkif;
   1.546 -    pending_req->id        = req->id;
   1.547 -    pending_req->operation = operation;
   1.548 -    pending_req->status    = BLKIF_RSP_OKAY;
   1.549 -    pending_req->nr_pages  = nseg;
   1.550 +	pending_req = &pending_reqs[pending_idx];
   1.551 +	pending_req->blkif     = blkif;
   1.552 +	pending_req->id        = req->id;
   1.553 +	pending_req->operation = operation;
   1.554 +	pending_req->status    = BLKIF_RSP_OKAY;
   1.555 +	pending_req->nr_pages  = nseg;
   1.556  
   1.557 -    for ( i = 0; i < nseg; i++ )
   1.558 -    {
   1.559 -        if ( ((int)preq.sector_number|(int)seg[i].nsec) &
   1.560 -             ((bdev_hardsect_size(preq.bdev) >> 9) - 1) )
   1.561 -        {
   1.562 -            DPRINTK("Misaligned I/O request from domain %d", blkif->domid);
   1.563 -            goto cleanup_and_fail;
   1.564 -        }
   1.565 +	for (i = 0; i < nseg; i++) {
   1.566 +		if (((int)preq.sector_number|(int)seg[i].nsec) &
   1.567 +		    ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
   1.568 +			DPRINTK("Misaligned I/O request from domain %d",
   1.569 +				blkif->domid);
   1.570 +			goto cleanup_and_fail;
   1.571 +		}
   1.572  
   1.573 -        while ( (bio == NULL) ||
   1.574 -                (bio_add_page(bio,
   1.575 -                              virt_to_page(MMAP_VADDR(pending_idx, i)),
   1.576 -                              seg[i].nsec << 9,
   1.577 -                              seg[i].buf & ~PAGE_MASK) == 0) )
   1.578 -        {
   1.579 -            bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
   1.580 -            if ( unlikely(bio == NULL) )
   1.581 -            {
   1.582 -            cleanup_and_fail:
   1.583 -                for ( i = 0; i < (nbio-1); i++ )
   1.584 -                    bio_put(biolist[i]);
   1.585 -                fast_flush_area(pending_idx, nseg);
   1.586 -                goto bad_descriptor;
   1.587 -            }
   1.588 +		while ((bio == NULL) ||
   1.589 +		       (bio_add_page(bio,
   1.590 +				     virt_to_page(MMAP_VADDR(pending_idx, i)),
   1.591 +				     seg[i].nsec << 9,
   1.592 +				     seg[i].buf & ~PAGE_MASK) == 0)) {
   1.593 +			bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
   1.594 +			if (unlikely(bio == NULL)) {
   1.595 +			cleanup_and_fail:
   1.596 +				for (i = 0; i < (nbio-1); i++)
   1.597 +					bio_put(biolist[i]);
   1.598 +				fast_flush_area(pending_idx, nseg);
   1.599 +				goto bad_descriptor;
   1.600 +			}
   1.601                  
   1.602 -            bio->bi_bdev    = preq.bdev;
   1.603 -            bio->bi_private = pending_req;
   1.604 -            bio->bi_end_io  = end_block_io_op;
   1.605 -            bio->bi_sector  = preq.sector_number;
   1.606 -        }
   1.607 +			bio->bi_bdev    = preq.bdev;
   1.608 +			bio->bi_private = pending_req;
   1.609 +			bio->bi_end_io  = end_block_io_op;
   1.610 +			bio->bi_sector  = preq.sector_number;
   1.611 +		}
   1.612  
   1.613 -        preq.sector_number += seg[i].nsec;
   1.614 -    }
   1.615 +		preq.sector_number += seg[i].nsec;
   1.616 +	}
   1.617  
   1.618 -    if ( (q = bdev_get_queue(bio->bi_bdev)) != plugged_queue )
   1.619 -    {
   1.620 -        flush_plugged_queue();
   1.621 -        blk_get_queue(q);
   1.622 -        plugged_queue = q;
   1.623 -    }
   1.624 +	if ((q = bdev_get_queue(bio->bi_bdev)) != plugged_queue) {
   1.625 +		flush_plugged_queue();
   1.626 +		blk_get_queue(q);
   1.627 +		plugged_queue = q;
   1.628 +	}
   1.629  
   1.630 -    atomic_set(&pending_req->pendcnt, nbio);
   1.631 -    pending_cons++;
   1.632 -    blkif_get(blkif);
   1.633 +	atomic_set(&pending_req->pendcnt, nbio);
   1.634 +	pending_cons++;
   1.635 +	blkif_get(blkif);
   1.636  
   1.637 -    for ( i = 0; i < nbio; i++ )
   1.638 -        submit_bio(operation, biolist[i]);
   1.639 +	for (i = 0; i < nbio; i++)
   1.640 +		submit_bio(operation, biolist[i]);
   1.641  
   1.642 -    return;
   1.643 +	return;
   1.644  
   1.645   bad_descriptor:
   1.646 -    make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
   1.647 +	make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
   1.648  } 
   1.649  
   1.650  
   1.651 @@ -475,66 +465,71 @@ static void dispatch_rw_block_io(blkif_t
   1.652  static void make_response(blkif_t *blkif, unsigned long id, 
   1.653                            unsigned short op, int st)
   1.654  {
   1.655 -    blkif_response_t *resp;
   1.656 -    unsigned long     flags;
   1.657 -    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   1.658 +	blkif_response_t *resp;
   1.659 +	unsigned long     flags;
   1.660 +	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   1.661  
   1.662 -    /* Place on the response ring for the relevant domain. */ 
   1.663 -    spin_lock_irqsave(&blkif->blk_ring_lock, flags);
   1.664 -    resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
   1.665 -    resp->id        = id;
   1.666 -    resp->operation = op;
   1.667 -    resp->status    = st;
   1.668 -    wmb(); /* Ensure other side can see the response fields. */
   1.669 -    blk_ring->rsp_prod_pvt++;
   1.670 -    RING_PUSH_RESPONSES(blk_ring);
   1.671 -    spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
   1.672 +	/* Place on the response ring for the relevant domain. */ 
   1.673 +	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
   1.674 +	resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
   1.675 +	resp->id        = id;
   1.676 +	resp->operation = op;
   1.677 +	resp->status    = st;
   1.678 +	wmb(); /* Ensure other side can see the response fields. */
   1.679 +	blk_ring->rsp_prod_pvt++;
   1.680 +	RING_PUSH_RESPONSES(blk_ring);
   1.681 +	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
   1.682  
   1.683 -    /* Kick the relevant domain. */
   1.684 -    notify_via_evtchn(blkif->evtchn);
   1.685 +	/* Kick the relevant domain. */
   1.686 +	notify_via_evtchn(blkif->evtchn);
   1.687  }
   1.688  
   1.689  void blkif_deschedule(blkif_t *blkif)
   1.690  {
   1.691 -    remove_from_blkdev_list(blkif);
   1.692 +	remove_from_blkdev_list(blkif);
   1.693  }
   1.694  
   1.695  static int __init blkif_init(void)
   1.696  {
   1.697 -    int i;
   1.698 -    struct page *page;
   1.699 +	int i;
   1.700 +	struct page *page;
   1.701  
   1.702 -    if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
   1.703 -         !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
   1.704 -        return 0;
   1.705 +	if (!(xen_start_info->flags & SIF_INITDOMAIN) &&
   1.706 +	    !(xen_start_info->flags & SIF_BLK_BE_DOMAIN))
   1.707 +		return 0;
   1.708  
   1.709 -    blkif_interface_init();
   1.710 +	blkif_interface_init();
   1.711  
   1.712 -    page = balloon_alloc_empty_page_range(MMAP_PAGES);
   1.713 -    BUG_ON(page == NULL);
   1.714 -    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
   1.715 +	page = balloon_alloc_empty_page_range(MMAP_PAGES);
   1.716 +	BUG_ON(page == NULL);
   1.717 +	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
   1.718  
   1.719 -    pending_cons = 0;
   1.720 -    pending_prod = MAX_PENDING_REQS;
   1.721 -    memset(pending_reqs, 0, sizeof(pending_reqs));
   1.722 -    for ( i = 0; i < MAX_PENDING_REQS; i++ )
   1.723 -        pending_ring[i] = i;
   1.724 +	pending_cons = 0;
   1.725 +	pending_prod = MAX_PENDING_REQS;
   1.726 +	memset(pending_reqs, 0, sizeof(pending_reqs));
   1.727 +	for (i = 0; i < MAX_PENDING_REQS; i++)
   1.728 +		pending_ring[i] = i;
   1.729      
   1.730 -    spin_lock_init(&blkio_schedule_list_lock);
   1.731 -    INIT_LIST_HEAD(&blkio_schedule_list);
   1.732 +	spin_lock_init(&blkio_schedule_list_lock);
   1.733 +	INIT_LIST_HEAD(&blkio_schedule_list);
   1.734  
   1.735 -    if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
   1.736 -        BUG();
   1.737 +	BUG_ON(kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0);
   1.738  
   1.739 -    blkif_xenbus_init();
   1.740 +	blkif_xenbus_init();
   1.741  
   1.742 -    memset( pending_grant_handles,  BLKBACK_INVALID_HANDLE, MMAP_PAGES );
   1.743 +	memset(pending_grant_handles,  BLKBACK_INVALID_HANDLE, MMAP_PAGES);
   1.744  
   1.745 -#ifdef CONFIG_XEN_BLKDEV_TAP_BE
   1.746 -    printk(KERN_ALERT "NOTE: Blkif backend is running with tap support on!\n");
   1.747 -#endif
   1.748 -
   1.749 -    return 0;
   1.750 +	return 0;
   1.751  }
   1.752  
   1.753  __initcall(blkif_init);
   1.754 +
   1.755 +/*
   1.756 + * Local variables:
   1.757 + *  c-file-style: "linux"
   1.758 + *  indent-tabs-mode: t
   1.759 + *  c-indent-level: 8
   1.760 + *  c-basic-offset: 8
   1.761 + *  tab-width: 8
   1.762 + * End:
   1.763 + */
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/common.h	Thu Sep 22 14:01:01 2005 +0100
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/common.h	Thu Sep 22 14:04:14 2005 +0100
     2.3 @@ -31,39 +31,39 @@
     2.4  #endif
     2.5  
     2.6  struct vbd {
     2.7 -    blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
     2.8 -    unsigned char  readonly;    /* Non-zero -> read-only */
     2.9 -    unsigned char  type;        /* VDISK_xxx */
    2.10 -    u32            pdevice;     /* phys device that this vbd maps to */
    2.11 -    struct block_device *bdev;
    2.12 +	blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
    2.13 +	unsigned char  readonly;    /* Non-zero -> read-only */
    2.14 +	unsigned char  type;        /* VDISK_xxx */
    2.15 +	u32            pdevice;     /* phys device that this vbd maps to */
    2.16 +	struct block_device *bdev;
    2.17  }; 
    2.18  
    2.19  typedef struct blkif_st {
    2.20 -    /* Unique identifier for this interface. */
    2.21 -    domid_t           domid;
    2.22 -    unsigned int      handle;
    2.23 -    /* Physical parameters of the comms window. */
    2.24 -    unsigned int      evtchn;
    2.25 -    unsigned int      remote_evtchn;
    2.26 -    /* Comms information. */
    2.27 -    blkif_back_ring_t blk_ring;
    2.28 -    struct vm_struct *blk_ring_area;
    2.29 -    /* VBDs attached to this interface. */
    2.30 -    struct vbd        vbd;
    2.31 -    /* Private fields. */
    2.32 -    enum { DISCONNECTED, CONNECTED } status;
    2.33 +	/* Unique identifier for this interface. */
    2.34 +	domid_t           domid;
    2.35 +	unsigned int      handle;
    2.36 +	/* Physical parameters of the comms window. */
    2.37 +	unsigned int      evtchn;
    2.38 +	unsigned int      remote_evtchn;
    2.39 +	/* Comms information. */
    2.40 +	blkif_back_ring_t blk_ring;
    2.41 +	struct vm_struct *blk_ring_area;
    2.42 +	/* VBDs attached to this interface. */
    2.43 +	struct vbd        vbd;
    2.44 +	/* Private fields. */
    2.45 +	enum { DISCONNECTED, CONNECTED } status;
    2.46  #ifdef CONFIG_XEN_BLKDEV_TAP_BE
    2.47 -    /* Is this a blktap frontend */
    2.48 -    unsigned int     is_blktap;
    2.49 +	/* Is this a blktap frontend */
    2.50 +	unsigned int     is_blktap;
    2.51  #endif
    2.52 -    struct list_head blkdev_list;
    2.53 -    spinlock_t       blk_ring_lock;
    2.54 -    atomic_t         refcnt;
    2.55 +	struct list_head blkdev_list;
    2.56 +	spinlock_t       blk_ring_lock;
    2.57 +	atomic_t         refcnt;
    2.58  
    2.59 -    struct work_struct free_work;
    2.60 +	struct work_struct free_work;
    2.61  
    2.62 -    u16         shmem_handle;
    2.63 -    grant_ref_t shmem_ref;
    2.64 +	u16         shmem_handle;
    2.65 +	grant_ref_t shmem_ref;
    2.66  } blkif_t;
    2.67  
    2.68  blkif_t *alloc_blkif(domid_t domid);
    2.69 @@ -71,11 +71,11 @@ void free_blkif_callback(blkif_t *blkif)
    2.70  int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
    2.71  
    2.72  #define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
    2.73 -#define blkif_put(_b)                             \
    2.74 -    do {                                          \
    2.75 -        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
    2.76 -            free_blkif_callback(_b);		  \
    2.77 -    } while (0)
    2.78 +#define blkif_put(_b)					\
    2.79 +	do {						\
    2.80 +		if (atomic_dec_and_test(&(_b)->refcnt))	\
    2.81 +			free_blkif_callback(_b);	\
    2.82 +	} while (0)
    2.83  
    2.84  /* Create a vbd. */
    2.85  int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, u32 pdevice,
    2.86 @@ -87,10 +87,10 @@ unsigned int vbd_info(struct vbd *vbd);
    2.87  unsigned long vbd_secsize(struct vbd *vbd);
    2.88  
    2.89  struct phys_req {
    2.90 -    unsigned short       dev;
    2.91 -    unsigned short       nr_sects;
    2.92 -    struct block_device *bdev;
    2.93 -    blkif_sector_t       sector_number;
    2.94 +	unsigned short       dev;
    2.95 +	unsigned short       nr_sects;
    2.96 +	struct block_device *bdev;
    2.97 +	blkif_sector_t       sector_number;
    2.98  };
    2.99  
   2.100  int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation); 
   2.101 @@ -104,3 +104,13 @@ void blkif_xenbus_init(void);
   2.102  irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
   2.103  
   2.104  #endif /* __BLKIF__BACKEND__COMMON_H__ */
   2.105 +
   2.106 +/*
   2.107 + * Local variables:
   2.108 + *  c-file-style: "linux"
   2.109 + *  indent-tabs-mode: t
   2.110 + *  c-indent-level: 8
   2.111 + *  c-basic-offset: 8
   2.112 + *  tab-width: 8
   2.113 + * End:
   2.114 + */
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Thu Sep 22 14:01:01 2005 +0100
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Thu Sep 22 14:04:14 2005 +0100
     3.3 @@ -13,134 +13,144 @@ static kmem_cache_t *blkif_cachep;
     3.4  
     3.5  blkif_t *alloc_blkif(domid_t domid)
     3.6  {
     3.7 -    blkif_t *blkif;
     3.8 +	blkif_t *blkif;
     3.9  
    3.10 -    blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
    3.11 -    if (!blkif)
    3.12 -	    return ERR_PTR(-ENOMEM);
    3.13 +	blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
    3.14 +	if (!blkif)
    3.15 +		return ERR_PTR(-ENOMEM);
    3.16  
    3.17 -    memset(blkif, 0, sizeof(*blkif));
    3.18 -    blkif->domid = domid;
    3.19 -    blkif->status = DISCONNECTED;
    3.20 -    spin_lock_init(&blkif->blk_ring_lock);
    3.21 -    atomic_set(&blkif->refcnt, 1);
    3.22 +	memset(blkif, 0, sizeof(*blkif));
    3.23 +	blkif->domid = domid;
    3.24 +	blkif->status = DISCONNECTED;
    3.25 +	spin_lock_init(&blkif->blk_ring_lock);
    3.26 +	atomic_set(&blkif->refcnt, 1);
    3.27  
    3.28 -    return blkif;
    3.29 +	return blkif;
    3.30  }
    3.31  
    3.32  static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
    3.33  {
    3.34 -    struct gnttab_map_grant_ref op;
    3.35 +	struct gnttab_map_grant_ref op;
    3.36  
    3.37 -    op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
    3.38 -    op.flags     = GNTMAP_host_map;
    3.39 -    op.ref       = shared_page;
    3.40 -    op.dom       = blkif->domid;
    3.41 +	op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
    3.42 +	op.flags     = GNTMAP_host_map;
    3.43 +	op.ref       = shared_page;
    3.44 +	op.dom       = blkif->domid;
    3.45  
    3.46 -    lock_vm_area(blkif->blk_ring_area);
    3.47 -    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
    3.48 -    unlock_vm_area(blkif->blk_ring_area);
    3.49 +	lock_vm_area(blkif->blk_ring_area);
    3.50 +	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
    3.51 +	unlock_vm_area(blkif->blk_ring_area);
    3.52  
    3.53 -    if (op.handle < 0) {
    3.54 -	DPRINTK(" Grant table operation failure !\n");
    3.55 -	return op.handle;
    3.56 -    }
    3.57 +	if (op.handle < 0) {
    3.58 +		DPRINTK(" Grant table operation failure !\n");
    3.59 +		return op.handle;
    3.60 +	}
    3.61  
    3.62 -    blkif->shmem_ref = shared_page;
    3.63 -    blkif->shmem_handle = op.handle;
    3.64 +	blkif->shmem_ref = shared_page;
    3.65 +	blkif->shmem_handle = op.handle;
    3.66  
    3.67 -    return 0;
    3.68 +	return 0;
    3.69  }
    3.70  
    3.71  static void unmap_frontend_page(blkif_t *blkif)
    3.72  {
    3.73 -    struct gnttab_unmap_grant_ref op;
    3.74 +	struct gnttab_unmap_grant_ref op;
    3.75  
    3.76 -    op.host_addr    = (unsigned long)blkif->blk_ring_area->addr;
    3.77 -    op.handle       = blkif->shmem_handle;
    3.78 -    op.dev_bus_addr = 0;
    3.79 +	op.host_addr    = (unsigned long)blkif->blk_ring_area->addr;
    3.80 +	op.handle       = blkif->shmem_handle;
    3.81 +	op.dev_bus_addr = 0;
    3.82  
    3.83 -    lock_vm_area(blkif->blk_ring_area);
    3.84 -    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
    3.85 -    unlock_vm_area(blkif->blk_ring_area);
    3.86 +	lock_vm_area(blkif->blk_ring_area);
    3.87 +	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
    3.88 +	unlock_vm_area(blkif->blk_ring_area);
    3.89  }
    3.90  
    3.91  int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
    3.92  {
    3.93 -    blkif_sring_t *sring;
    3.94 -    evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
    3.95 -    int err;
    3.96 +	blkif_sring_t *sring;
    3.97 +	evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
    3.98 +	int err;
    3.99  
   3.100 -    BUG_ON(blkif->remote_evtchn);
   3.101 +	BUG_ON(blkif->remote_evtchn);
   3.102  
   3.103 -    if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
   3.104 -	return -ENOMEM;
   3.105 +	if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
   3.106 +		return -ENOMEM;
   3.107  
   3.108 -    err = map_frontend_page(blkif, shared_page);
   3.109 -    if (err) {
   3.110 -        free_vm_area(blkif->blk_ring_area);
   3.111 -	return err;
   3.112 -    }
   3.113 +	err = map_frontend_page(blkif, shared_page);
   3.114 +	if (err) {
   3.115 +		free_vm_area(blkif->blk_ring_area);
   3.116 +		return err;
   3.117 +	}
   3.118  
   3.119 -    op.u.bind_interdomain.dom1 = DOMID_SELF;
   3.120 -    op.u.bind_interdomain.dom2 = blkif->domid;
   3.121 -    op.u.bind_interdomain.port1 = 0;
   3.122 -    op.u.bind_interdomain.port2 = evtchn;
   3.123 -    err = HYPERVISOR_event_channel_op(&op);
   3.124 -    if (err) {
   3.125 -	unmap_frontend_page(blkif);
   3.126 -        free_vm_area(blkif->blk_ring_area);
   3.127 -	return err;
   3.128 -    }
   3.129 +	op.u.bind_interdomain.dom1 = DOMID_SELF;
   3.130 +	op.u.bind_interdomain.dom2 = blkif->domid;
   3.131 +	op.u.bind_interdomain.port1 = 0;
   3.132 +	op.u.bind_interdomain.port2 = evtchn;
   3.133 +	err = HYPERVISOR_event_channel_op(&op);
   3.134 +	if (err) {
   3.135 +		unmap_frontend_page(blkif);
   3.136 +		free_vm_area(blkif->blk_ring_area);
   3.137 +		return err;
   3.138 +	}
   3.139  
   3.140 -    blkif->evtchn = op.u.bind_interdomain.port1;
   3.141 -    blkif->remote_evtchn = evtchn;
   3.142 +	blkif->evtchn = op.u.bind_interdomain.port1;
   3.143 +	blkif->remote_evtchn = evtchn;
   3.144  
   3.145 -    sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
   3.146 -    SHARED_RING_INIT(sring);
   3.147 -    BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
   3.148 +	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
   3.149 +	SHARED_RING_INIT(sring);
   3.150 +	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
   3.151  
   3.152 -    bind_evtchn_to_irqhandler(blkif->evtchn, blkif_be_int, 0, "blkif-backend",
   3.153 -			      blkif);
   3.154 -    blkif->status = CONNECTED;
   3.155 +	bind_evtchn_to_irqhandler(
   3.156 +		blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);
   3.157 +	blkif->status = CONNECTED;
   3.158  
   3.159 -    return 0;
   3.160 +	return 0;
   3.161  }
   3.162  
   3.163  static void free_blkif(void *arg)
   3.164  {
   3.165 -    evtchn_op_t op = { .cmd = EVTCHNOP_close };
   3.166 -    blkif_t *blkif = (blkif_t *)arg;
   3.167 +	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   3.168 +	blkif_t *blkif = (blkif_t *)arg;
   3.169  
   3.170 -    op.u.close.port = blkif->evtchn;
   3.171 -    op.u.close.dom = DOMID_SELF;
   3.172 -    HYPERVISOR_event_channel_op(&op);
   3.173 -    op.u.close.port = blkif->remote_evtchn;
   3.174 -    op.u.close.dom = blkif->domid;
   3.175 -    HYPERVISOR_event_channel_op(&op);
   3.176 +	op.u.close.port = blkif->evtchn;
   3.177 +	op.u.close.dom = DOMID_SELF;
   3.178 +	HYPERVISOR_event_channel_op(&op);
   3.179 +	op.u.close.port = blkif->remote_evtchn;
   3.180 +	op.u.close.dom = blkif->domid;
   3.181 +	HYPERVISOR_event_channel_op(&op);
   3.182  
   3.183 -    vbd_free(&blkif->vbd);
   3.184 +	vbd_free(&blkif->vbd);
   3.185  
   3.186 -    if (blkif->evtchn)
   3.187 -        unbind_evtchn_from_irqhandler(blkif->evtchn, blkif);
   3.188 +	if (blkif->evtchn)
   3.189 +		unbind_evtchn_from_irqhandler(blkif->evtchn, blkif);
   3.190  
   3.191 -    if (blkif->blk_ring.sring) {
   3.192 -	unmap_frontend_page(blkif);
   3.193 -        free_vm_area(blkif->blk_ring_area);
   3.194 -	blkif->blk_ring.sring = NULL;
   3.195 -    }
   3.196 +	if (blkif->blk_ring.sring) {
   3.197 +		unmap_frontend_page(blkif);
   3.198 +		free_vm_area(blkif->blk_ring_area);
   3.199 +		blkif->blk_ring.sring = NULL;
   3.200 +	}
   3.201  
   3.202 -    kmem_cache_free(blkif_cachep, blkif);
   3.203 +	kmem_cache_free(blkif_cachep, blkif);
   3.204  }
   3.205  
   3.206  void free_blkif_callback(blkif_t *blkif)
   3.207  {
   3.208 -    INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
   3.209 -    schedule_work(&blkif->free_work);
   3.210 +	INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
   3.211 +	schedule_work(&blkif->free_work);
   3.212  }
   3.213  
   3.214  void __init blkif_interface_init(void)
   3.215  {
   3.216 -    blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 
   3.217 -                                     0, 0, NULL, NULL);
   3.218 +	blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 
   3.219 +					 0, 0, NULL, NULL);
   3.220  }
   3.221 +
   3.222 +/*
   3.223 + * Local variables:
   3.224 + *  c-file-style: "linux"
   3.225 + *  indent-tabs-mode: t
   3.226 + *  c-indent-level: 8
   3.227 + *  c-basic-offset: 8
   3.228 + *  tab-width: 8
   3.229 + * End:
   3.230 + */
     4.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c	Thu Sep 22 14:01:01 2005 +0100
     4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c	Thu Sep 22 14:04:14 2005 +0100
     4.3 @@ -11,10 +11,10 @@
     4.4  
     4.5  static inline dev_t vbd_map_devnum(u32 cookie)
     4.6  {
     4.7 -    return MKDEV(BLKIF_MAJOR(cookie), BLKIF_MINOR(cookie));
     4.8 +	return MKDEV(BLKIF_MAJOR(cookie), BLKIF_MINOR(cookie));
     4.9  }
    4.10 -#define vbd_sz(_v)   ((_v)->bdev->bd_part ? \
    4.11 -    (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
    4.12 +#define vbd_sz(_v)   ((_v)->bdev->bd_part ?				\
    4.13 +	(_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
    4.14  #define bdev_put(_b) blkdev_put(_b)
    4.15  
    4.16  unsigned long vbd_size(struct vbd *vbd)
    4.17 @@ -35,63 +35,73 @@ unsigned long vbd_secsize(struct vbd *vb
    4.18  int vbd_create(blkif_t *blkif, blkif_vdev_t handle,
    4.19  	       u32 pdevice, int readonly)
    4.20  {
    4.21 -    struct vbd *vbd;
    4.22 +	struct vbd *vbd;
    4.23  
    4.24 -    vbd = &blkif->vbd;
    4.25 -    vbd->handle   = handle; 
    4.26 -    vbd->readonly = readonly;
    4.27 -    vbd->type     = 0;
    4.28 +	vbd = &blkif->vbd;
    4.29 +	vbd->handle   = handle; 
    4.30 +	vbd->readonly = readonly;
    4.31 +	vbd->type     = 0;
    4.32  
    4.33 -    vbd->pdevice  = pdevice;
    4.34 +	vbd->pdevice  = pdevice;
    4.35  
    4.36 -    vbd->bdev = open_by_devnum(
    4.37 -        vbd_map_devnum(vbd->pdevice),
    4.38 -        vbd->readonly ? FMODE_READ : FMODE_WRITE);
    4.39 -    if ( IS_ERR(vbd->bdev) )
    4.40 -    {
    4.41 -        DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
    4.42 -        return -ENOENT;
    4.43 -    }
    4.44 +	vbd->bdev = open_by_devnum(
    4.45 +		vbd_map_devnum(vbd->pdevice),
    4.46 +		vbd->readonly ? FMODE_READ : FMODE_WRITE);
    4.47 +	if (IS_ERR(vbd->bdev)) {
    4.48 +		DPRINTK("vbd_creat: device %08x doesn't exist.\n",
    4.49 +			vbd->pdevice);
    4.50 +		return -ENOENT;
    4.51 +	}
    4.52  
    4.53 -    if ( (vbd->bdev->bd_disk == NULL) )
    4.54 -    {
    4.55 -        DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
    4.56 -	vbd_free(vbd);
    4.57 -        return -ENOENT;
    4.58 -    }
    4.59 +	if (vbd->bdev->bd_disk == NULL) {
    4.60 +		DPRINTK("vbd_creat: device %08x doesn't exist.\n",
    4.61 +			vbd->pdevice);
    4.62 +		vbd_free(vbd);
    4.63 +		return -ENOENT;
    4.64 +	}
    4.65  
    4.66 -    if ( vbd->bdev->bd_disk->flags & GENHD_FL_CD )
    4.67 -        vbd->type |= VDISK_CDROM;
    4.68 -    if ( vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE )
    4.69 -        vbd->type |= VDISK_REMOVABLE;
    4.70 +	if (vbd->bdev->bd_disk->flags & GENHD_FL_CD)
    4.71 +		vbd->type |= VDISK_CDROM;
    4.72 +	if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
    4.73 +		vbd->type |= VDISK_REMOVABLE;
    4.74  
    4.75 -    DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
    4.76 -            handle, blkif->domid);
    4.77 -    return 0;
    4.78 +	DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
    4.79 +		handle, blkif->domid);
    4.80 +	return 0;
    4.81  }
    4.82  
    4.83  void vbd_free(struct vbd *vbd)
    4.84  {
    4.85 -    if (vbd->bdev)
    4.86 -	bdev_put(vbd->bdev);
    4.87 -    vbd->bdev = NULL;
    4.88 +	if (vbd->bdev)
    4.89 +		bdev_put(vbd->bdev);
    4.90 +	vbd->bdev = NULL;
    4.91  }
    4.92  
    4.93  int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
    4.94  {
    4.95 -    struct vbd *vbd = &blkif->vbd;
    4.96 -    int rc = -EACCES;
    4.97 +	struct vbd *vbd = &blkif->vbd;
    4.98 +	int rc = -EACCES;
    4.99  
   4.100 -    if ((operation == WRITE) && vbd->readonly)
   4.101 -        goto out;
   4.102 +	if ((operation == WRITE) && vbd->readonly)
   4.103 +		goto out;
   4.104  
   4.105 -    if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
   4.106 -        goto out;
   4.107 +	if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
   4.108 +		goto out;
   4.109  
   4.110 -    req->dev  = vbd->pdevice;
   4.111 -    req->bdev = vbd->bdev;
   4.112 -    rc = 0;
   4.113 +	req->dev  = vbd->pdevice;
   4.114 +	req->bdev = vbd->bdev;
   4.115 +	rc = 0;
   4.116  
   4.117   out:
   4.118 -    return rc;
   4.119 +	return rc;
   4.120  }
   4.121 +
   4.122 +/*
   4.123 + * Local variables:
   4.124 + *  c-file-style: "linux"
   4.125 + *  indent-tabs-mode: t
   4.126 + *  c-indent-level: 8
   4.127 + *  c-basic-offset: 8
   4.128 + *  tab-width: 8
   4.129 + * End:
   4.130 + */
     5.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c	Thu Sep 22 14:01:01 2005 +0100
     5.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c	Thu Sep 22 14:04:14 2005 +0100
     5.3 @@ -124,7 +124,7 @@ static void frontend_changed(struct xenb
     5.4  
     5.5  	return;
     5.6  
     5.7 -abort:
     5.8 + abort:
     5.9  	xenbus_transaction_end(1);
    5.10  }
    5.11  
    5.12 @@ -278,3 +278,13 @@ void blkif_xenbus_init(void)
    5.13  {
    5.14  	xenbus_register_backend(&blkback);
    5.15  }
    5.16 +
    5.17 +/*
    5.18 + * Local variables:
    5.19 + *  c-file-style: "linux"
    5.20 + *  indent-tabs-mode: t
    5.21 + *  c-indent-level: 8
    5.22 + *  c-basic-offset: 8
    5.23 + *  tab-width: 8
    5.24 + * End:
    5.25 + */
     6.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h	Thu Sep 22 14:01:01 2005 +0100
     6.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h	Thu Sep 22 14:04:14 2005 +0100
     6.3 @@ -146,4 +146,15 @@ extern void do_blkif_request (request_qu
     6.4  int xlvbd_add(blkif_sector_t capacity, int device,
     6.5  	      u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
     6.6  void xlvbd_del(struct blkfront_info *info);
     6.7 +
     6.8  #endif /* __XEN_DRIVERS_BLOCK_H__ */
     6.9 +
    6.10 +/*
    6.11 + * Local variables:
    6.12 + *  c-file-style: "linux"
    6.13 + *  indent-tabs-mode: t
    6.14 + *  c-indent-level: 8
    6.15 + *  c-basic-offset: 8
    6.16 + *  tab-width: 8
    6.17 + * End:
    6.18 + */
     7.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c	Thu Sep 22 14:01:01 2005 +0100
     7.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c	Thu Sep 22 14:04:14 2005 +0100
     7.3 @@ -65,7 +65,7 @@ static struct xlbd_type_info xlbd_vbd_ty
     7.4  };
     7.5  
     7.6  static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
     7.7 -					  NUM_VBD_MAJORS];
     7.8 +					 NUM_VBD_MAJORS];
     7.9  
    7.10  #define XLBD_MAJOR_IDE_START	0
    7.11  #define XLBD_MAJOR_SCSI_START	(NUM_IDE_MAJORS)
    7.12 @@ -309,3 +309,13 @@ xlvbd_del(struct blkfront_info *info)
    7.13  
    7.14  	bdput(bd);
    7.15  }
    7.16 +
    7.17 +/*
    7.18 + * Local variables:
    7.19 + *  c-file-style: "linux"
    7.20 + *  indent-tabs-mode: t
    7.21 + *  c-indent-level: 8
    7.22 + *  c-basic-offset: 8
    7.23 + *  tab-width: 8
    7.24 + * End:
    7.25 + */
     8.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Thu Sep 22 14:01:01 2005 +0100
     8.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Thu Sep 22 14:04:14 2005 +0100
     8.3 @@ -4,7 +4,6 @@
     8.4   * This is a modified version of the block backend driver that remaps requests
     8.5   * to a user-space memory region.  It is intended to be used to write 
     8.6   * application-level servers that provide block interfaces to client VMs.
     8.7 - * 
     8.8   */
     8.9  
    8.10  #include <linux/kernel.h>
    8.11 @@ -67,20 +66,19 @@ static int blktap_read_ufe_ring(void);
    8.12  
    8.13  static inline int BLKTAP_MODE_VALID(unsigned long arg)
    8.14  {
    8.15 -    return (
    8.16 -        ( arg == BLKTAP_MODE_PASSTHROUGH  ) ||
    8.17 -        ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
    8.18 -        ( arg == BLKTAP_MODE_INTERPOSE    ) );
    8.19 +	return ((arg == BLKTAP_MODE_PASSTHROUGH ) ||
    8.20 +		(arg == BLKTAP_MODE_INTERCEPT_FE) ||
    8.21 +		(arg == BLKTAP_MODE_INTERPOSE   ));
    8.22  /*
    8.23 -    return (
    8.24 -        ( arg == BLKTAP_MODE_PASSTHROUGH  ) ||
    8.25 -        ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
    8.26 -        ( arg == BLKTAP_MODE_INTERCEPT_BE ) ||
    8.27 -        ( arg == BLKTAP_MODE_INTERPOSE    ) ||
    8.28 -        ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) ||
    8.29 -        ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) ||
    8.30 -        ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH )
    8.31 -        );
    8.32 +  return (
    8.33 +  ( arg == BLKTAP_MODE_PASSTHROUGH  ) ||
    8.34 +  ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
    8.35 +  ( arg == BLKTAP_MODE_INTERCEPT_BE ) ||
    8.36 +  ( arg == BLKTAP_MODE_INTERPOSE    ) ||
    8.37 +  ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) ||
    8.38 +  ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) ||
    8.39 +  ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH )
    8.40 +  );
    8.41  */
    8.42  }
    8.43  
    8.44 @@ -110,14 +108,12 @@ unsigned long mmap_vstart;  /* Kernel pa
    8.45  unsigned long rings_vstart; /* start of mmaped vma               */
    8.46  unsigned long user_vstart;  /* start of user mappings            */
    8.47  
    8.48 -#define MMAP_PAGES                                              \
    8.49 -    (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
    8.50 -#define MMAP_VADDR(_start, _req,_seg)                           \
    8.51 -    (_start +                                                   \
    8.52 -     ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +    \
    8.53 -     ((_seg) * PAGE_SIZE))
    8.54 -
    8.55 -
    8.56 +#define MMAP_PAGES						\
    8.57 +	(MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
    8.58 +#define MMAP_VADDR(_start, _req,_seg)					\
    8.59 +	(_start +							\
    8.60 +	 ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +	\
    8.61 +	 ((_seg) * PAGE_SIZE))
    8.62  
    8.63  /*
    8.64   * Each outstanding request that we've passed to the lower device layers has a 
    8.65 @@ -126,12 +122,12 @@ unsigned long user_vstart;  /* start of 
    8.66   * response queued for it, with the saved 'id' passed back.
    8.67   */
    8.68  typedef struct {
    8.69 -    blkif_t       *blkif;
    8.70 -    unsigned long  id;
    8.71 -    int            nr_pages;
    8.72 -    atomic_t       pendcnt;
    8.73 -    unsigned short operation;
    8.74 -    int            status;
    8.75 +	blkif_t       *blkif;
    8.76 +	unsigned long  id;
    8.77 +	int            nr_pages;
    8.78 +	atomic_t       pendcnt;
    8.79 +	unsigned short operation;
    8.80 +	int            status;
    8.81  } pending_req_t;
    8.82  
    8.83  /*
    8.84 @@ -156,17 +152,17 @@ static PEND_RING_IDX pending_prod, pendi
    8.85  
    8.86  static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx)
    8.87  {
    8.88 -    return ( (fe_dom << 16) | MASK_PEND_IDX(idx) );
    8.89 +	return ((fe_dom << 16) | MASK_PEND_IDX(idx));
    8.90  }
    8.91  
    8.92  extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id) 
    8.93  { 
    8.94 -    return (PEND_RING_IDX)( id & 0x0000ffff );
    8.95 +	return (PEND_RING_IDX)(id & 0x0000ffff);
    8.96  }
    8.97  
    8.98  extern inline domid_t ID_TO_DOM(unsigned long id) 
    8.99  { 
   8.100 -    return (domid_t)(id >> 16); 
   8.101 +	return (domid_t)(id >> 16); 
   8.102  }
   8.103  
   8.104  
   8.105 @@ -181,8 +177,8 @@ extern inline domid_t ID_TO_DOM(unsigned
   8.106   */
   8.107  struct grant_handle_pair
   8.108  {
   8.109 -    u16  kernel;
   8.110 -    u16  user;
   8.111 +	u16  kernel;
   8.112 +	u16  user;
   8.113  };
   8.114  static struct grant_handle_pair pending_grant_handles[MMAP_PAGES];
   8.115  #define pending_handle(_idx, _i) \
   8.116 @@ -199,21 +195,20 @@ static struct grant_handle_pair pending_
   8.117   */
   8.118  
   8.119  static struct page *blktap_nopage(struct vm_area_struct *vma,
   8.120 -                                             unsigned long address,
   8.121 -                                             int *type)
   8.122 +				  unsigned long address,
   8.123 +				  int *type)
   8.124  {
   8.125 -    /*
   8.126 -     * if the page has not been mapped in by the driver then generate
   8.127 -     * a SIGBUS to the domain.
   8.128 -     */
   8.129 +	/*
   8.130 +	 * if the page has not been mapped in by the driver then generate
   8.131 +	 * a SIGBUS to the domain.
   8.132 +	 */
   8.133 +	force_sig(SIGBUS, current);
   8.134  
   8.135 -    force_sig(SIGBUS, current);
   8.136 -
   8.137 -    return 0;
   8.138 +	return 0;
   8.139  }
   8.140  
   8.141  struct vm_operations_struct blktap_vm_ops = {
   8.142 -    nopage:   blktap_nopage,
   8.143 +	nopage:   blktap_nopage,
   8.144  };
   8.145  
   8.146  /******************************************************************
   8.147 @@ -222,44 +217,45 @@ struct vm_operations_struct blktap_vm_op
   8.148  
   8.149  static int blktap_open(struct inode *inode, struct file *filp)
   8.150  {
   8.151 -    blkif_sring_t *sring;
   8.152 -    
   8.153 -    if ( test_and_set_bit(0, &blktap_dev_inuse) )
   8.154 -        return -EBUSY;
   8.155 +	blkif_sring_t *sring;
   8.156 +
   8.157 +	if (test_and_set_bit(0, &blktap_dev_inuse))
   8.158 +		return -EBUSY;
   8.159      
   8.160 -    /* Allocate the fe ring. */
   8.161 -    sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
   8.162 -    if (sring == NULL)
   8.163 -        goto fail_nomem;
   8.164 +	/* Allocate the fe ring. */
   8.165 +	sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
   8.166 +	if (sring == NULL)
   8.167 +		goto fail_nomem;
   8.168  
   8.169 -    SetPageReserved(virt_to_page(sring));
   8.170 +	SetPageReserved(virt_to_page(sring));
   8.171      
   8.172 -    SHARED_RING_INIT(sring);
   8.173 -    FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE);
   8.174 +	SHARED_RING_INIT(sring);
   8.175 +	FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE);
   8.176  
   8.177 -    return 0;
   8.178 +	return 0;
   8.179  
   8.180   fail_nomem:
   8.181 -    return -ENOMEM;
   8.182 +	return -ENOMEM;
   8.183  }
   8.184  
   8.185  static int blktap_release(struct inode *inode, struct file *filp)
   8.186  {
   8.187 -    blktap_dev_inuse = 0;
   8.188 -    blktap_ring_ok = 0;
   8.189 +	blktap_dev_inuse = 0;
   8.190 +	blktap_ring_ok = 0;
   8.191  
   8.192 -    /* Free the ring page. */
   8.193 -    ClearPageReserved(virt_to_page(blktap_ufe_ring.sring));
   8.194 -    free_page((unsigned long) blktap_ufe_ring.sring);
   8.195 +	/* Free the ring page. */
   8.196 +	ClearPageReserved(virt_to_page(blktap_ufe_ring.sring));
   8.197 +	free_page((unsigned long) blktap_ufe_ring.sring);
   8.198  
   8.199 -    /* Clear any active mappings and free foreign map table */
   8.200 -    if (blktap_vma != NULL) {
   8.201 -        zap_page_range(blktap_vma, blktap_vma->vm_start, 
   8.202 -                       blktap_vma->vm_end - blktap_vma->vm_start, NULL);
   8.203 -        blktap_vma = NULL;
   8.204 -    }
   8.205 +	/* Clear any active mappings and free foreign map table */
   8.206 +	if (blktap_vma != NULL) {
   8.207 +		zap_page_range(
   8.208 +			blktap_vma, blktap_vma->vm_start, 
   8.209 +			blktap_vma->vm_end - blktap_vma->vm_start, NULL);
   8.210 +		blktap_vma = NULL;
   8.211 +	}
   8.212  
   8.213 -    return 0;
   8.214 +	return 0;
   8.215  }
   8.216  
   8.217  
   8.218 @@ -283,128 +279,124 @@ static int blktap_release(struct inode *
   8.219   */
   8.220  static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
   8.221  {
   8.222 -    int size;
   8.223 -    struct page **map;
   8.224 -    int i;
   8.225 +	int size;
   8.226 +	struct page **map;
   8.227 +	int i;
   8.228  
   8.229 -    DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n",
   8.230 -           vma->vm_start, vma->vm_end);
   8.231 +	DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n",
   8.232 +		vma->vm_start, vma->vm_end);
   8.233  
   8.234 -    vma->vm_flags |= VM_RESERVED;
   8.235 -    vma->vm_ops = &blktap_vm_ops;
   8.236 +	vma->vm_flags |= VM_RESERVED;
   8.237 +	vma->vm_ops = &blktap_vm_ops;
   8.238  
   8.239 -    size = vma->vm_end - vma->vm_start;
   8.240 -    if ( size != ( (MMAP_PAGES + RING_PAGES) << PAGE_SHIFT ) ) {
   8.241 -        printk(KERN_INFO 
   8.242 -               "blktap: you _must_ map exactly %d pages!\n",
   8.243 -               MMAP_PAGES + RING_PAGES);
   8.244 -        return -EAGAIN;
   8.245 -    }
   8.246 +	size = vma->vm_end - vma->vm_start;
   8.247 +	if (size != ((MMAP_PAGES + RING_PAGES) << PAGE_SHIFT)) {
   8.248 +		printk(KERN_INFO 
   8.249 +		       "blktap: you _must_ map exactly %d pages!\n",
   8.250 +		       MMAP_PAGES + RING_PAGES);
   8.251 +		return -EAGAIN;
   8.252 +	}
   8.253  
   8.254 -    size >>= PAGE_SHIFT;
   8.255 -    DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
   8.256 +	size >>= PAGE_SHIFT;
   8.257 +	DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
   8.258      
   8.259 -    rings_vstart = vma->vm_start;
   8.260 -    user_vstart  = rings_vstart + (RING_PAGES << PAGE_SHIFT);
   8.261 +	rings_vstart = vma->vm_start;
   8.262 +	user_vstart  = rings_vstart + (RING_PAGES << PAGE_SHIFT);
   8.263      
   8.264 -    /* Map the ring pages to the start of the region and reserve it. */
   8.265 +	/* Map the ring pages to the start of the region and reserve it. */
   8.266  
   8.267 -    /* not sure if I really need to do this... */
   8.268 -    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
   8.269 +	/* not sure if I really need to do this... */
   8.270 +	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
   8.271  
   8.272 -    if (remap_pfn_range(vma, vma->vm_start, 
   8.273 -                         __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT, 
   8.274 -                         PAGE_SIZE, vma->vm_page_prot)) 
   8.275 -    {
   8.276 -        WPRINTK("Mapping user ring failed!\n");
   8.277 -        goto fail;
   8.278 -    }
   8.279 +	if (remap_pfn_range(vma, vma->vm_start, 
   8.280 +			    __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT, 
   8.281 +			    PAGE_SIZE, vma->vm_page_prot)) {
   8.282 +		WPRINTK("Mapping user ring failed!\n");
   8.283 +		goto fail;
   8.284 +	}
   8.285 +
   8.286 +	/* Mark this VM as containing foreign pages, and set up mappings. */
   8.287 +	map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
   8.288 +		      * sizeof(struct page_struct*),
   8.289 +		      GFP_KERNEL);
   8.290 +	if (map == NULL) {
   8.291 +		WPRINTK("Couldn't alloc VM_FOREIGH map.\n");
   8.292 +		goto fail;
   8.293 +	}
   8.294  
   8.295 -    /* Mark this VM as containing foreign pages, and set up mappings. */
   8.296 -    map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
   8.297 -                  * sizeof(struct page_struct*),
   8.298 -                  GFP_KERNEL);
   8.299 -    if (map == NULL) 
   8.300 -    {
   8.301 -        WPRINTK("Couldn't alloc VM_FOREIGH map.\n");
   8.302 -        goto fail;
   8.303 -    }
   8.304 -
   8.305 -    for (i=0; i<((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
   8.306 -        map[i] = NULL;
   8.307 +	for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
   8.308 +		map[i] = NULL;
   8.309      
   8.310 -    vma->vm_private_data = map;
   8.311 -    vma->vm_flags |= VM_FOREIGN;
   8.312 +	vma->vm_private_data = map;
   8.313 +	vma->vm_flags |= VM_FOREIGN;
   8.314  
   8.315 -    blktap_vma = vma;
   8.316 -    blktap_ring_ok = 1;
   8.317 +	blktap_vma = vma;
   8.318 +	blktap_ring_ok = 1;
   8.319  
   8.320 -    return 0;
   8.321 +	return 0;
   8.322   fail:
   8.323 -    /* Clear any active mappings. */
   8.324 -    zap_page_range(vma, vma->vm_start, 
   8.325 -                   vma->vm_end - vma->vm_start, NULL);
   8.326 +	/* Clear any active mappings. */
   8.327 +	zap_page_range(vma, vma->vm_start, 
   8.328 +		       vma->vm_end - vma->vm_start, NULL);
   8.329  
   8.330 -    return -ENOMEM;
   8.331 +	return -ENOMEM;
   8.332  }
   8.333  
   8.334  static int blktap_ioctl(struct inode *inode, struct file *filp,
   8.335                          unsigned int cmd, unsigned long arg)
   8.336  {
   8.337 -    switch(cmd) {
   8.338 -    case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */
   8.339 -        return blktap_read_ufe_ring();
   8.340 +	switch(cmd) {
   8.341 +	case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */
   8.342 +		return blktap_read_ufe_ring();
   8.343  
   8.344 -    case BLKTAP_IOCTL_SETMODE:
   8.345 -        if (BLKTAP_MODE_VALID(arg)) {
   8.346 -            blktap_mode = arg;
   8.347 -            /* XXX: may need to flush rings here. */
   8.348 -            printk(KERN_INFO "blktap: set mode to %lx\n", arg);
   8.349 -            return 0;
   8.350 -        }
   8.351 -    case BLKTAP_IOCTL_PRINT_IDXS:
   8.352 +	case BLKTAP_IOCTL_SETMODE:
   8.353 +		if (BLKTAP_MODE_VALID(arg)) {
   8.354 +			blktap_mode = arg;
   8.355 +			/* XXX: may need to flush rings here. */
   8.356 +			printk(KERN_INFO "blktap: set mode to %lx\n", arg);
   8.357 +			return 0;
   8.358 +		}
   8.359 +	case BLKTAP_IOCTL_PRINT_IDXS:
   8.360          {
   8.361 -            //print_fe_ring_idxs();
   8.362 -            WPRINTK("User Rings: \n-----------\n");
   8.363 -            WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d "
   8.364 -                            "| req_prod: %2d, rsp_prod: %2d\n",
   8.365 -                            blktap_ufe_ring.rsp_cons,
   8.366 -                            blktap_ufe_ring.req_prod_pvt,
   8.367 -                            blktap_ufe_ring.sring->req_prod,
   8.368 -                            blktap_ufe_ring.sring->rsp_prod);
   8.369 +		//print_fe_ring_idxs();
   8.370 +		WPRINTK("User Rings: \n-----------\n");
   8.371 +		WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d "
   8.372 +			"| req_prod: %2d, rsp_prod: %2d\n",
   8.373 +			blktap_ufe_ring.rsp_cons,
   8.374 +			blktap_ufe_ring.req_prod_pvt,
   8.375 +			blktap_ufe_ring.sring->req_prod,
   8.376 +			blktap_ufe_ring.sring->rsp_prod);
   8.377              
   8.378          }
   8.379 -    }
   8.380 -    return -ENOIOCTLCMD;
   8.381 +	}
   8.382 +	return -ENOIOCTLCMD;
   8.383  }
   8.384  
   8.385  static unsigned int blktap_poll(struct file *file, poll_table *wait)
   8.386  {
   8.387 -        poll_wait(file, &blktap_wait, wait);
   8.388 -        if ( RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring) ) 
   8.389 -        {
   8.390 -            flush_tlb_all();
   8.391 +	poll_wait(file, &blktap_wait, wait);
   8.392 +	if (RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring)) {
   8.393 +		flush_tlb_all();
   8.394 +		RING_PUSH_REQUESTS(&blktap_ufe_ring);
   8.395 +		return POLLIN | POLLRDNORM;
   8.396 +	}
   8.397  
   8.398 -            RING_PUSH_REQUESTS(&blktap_ufe_ring);
   8.399 -            return POLLIN | POLLRDNORM;
   8.400 -        }
   8.401 -
   8.402 -        return 0;
   8.403 +	return 0;
   8.404  }
   8.405  
   8.406  void blktap_kick_user(void)
   8.407  {
   8.408 -    /* blktap_ring->req_prod = blktap_req_prod; */
   8.409 -    wake_up_interruptible(&blktap_wait);
   8.410 +	/* blktap_ring->req_prod = blktap_req_prod; */
   8.411 +	wake_up_interruptible(&blktap_wait);
   8.412  }
   8.413  
   8.414  static struct file_operations blktap_fops = {
   8.415 -    owner:    THIS_MODULE,
   8.416 -    poll:     blktap_poll,
   8.417 -    ioctl:    blktap_ioctl,
   8.418 -    open:     blktap_open,
   8.419 -    release:  blktap_release,
   8.420 -    mmap:     blktap_mmap,
   8.421 +	owner:    THIS_MODULE,
   8.422 +	poll:     blktap_poll,
   8.423 +	ioctl:    blktap_ioctl,
   8.424 +	open:     blktap_open,
   8.425 +	release:  blktap_release,
   8.426 +	mmap:     blktap_mmap,
   8.427  };
   8.428  
   8.429  
   8.430 @@ -417,44 +409,44 @@ static void make_response(blkif_t *blkif
   8.431  
   8.432  static void fast_flush_area(int idx, int nr_pages)
   8.433  {
   8.434 -    struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
   8.435 -    unsigned int i, op = 0;
   8.436 -    struct grant_handle_pair *handle;
   8.437 -    unsigned long ptep;
   8.438 +	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
   8.439 +	unsigned int i, op = 0;
   8.440 +	struct grant_handle_pair *handle;
   8.441 +	unsigned long ptep;
   8.442  
   8.443 -    for (i=0; i<nr_pages; i++)
   8.444 -    {
   8.445 -        handle = &pending_handle(idx, i);
   8.446 -        if (!BLKTAP_INVALID_HANDLE(handle))
   8.447 -        {
   8.448 +	for ( i = 0; i < nr_pages; i++)
   8.449 +	{
   8.450 +		handle = &pending_handle(idx, i);
   8.451 +		if (BLKTAP_INVALID_HANDLE(handle))
   8.452 +			continue;
   8.453  
   8.454 -            unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i);
   8.455 -            unmap[op].dev_bus_addr = 0;
   8.456 -            unmap[op].handle = handle->kernel;
   8.457 -            op++;
   8.458 +		unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i);
   8.459 +		unmap[op].dev_bus_addr = 0;
   8.460 +		unmap[op].handle = handle->kernel;
   8.461 +		op++;
   8.462  
   8.463 -            if (create_lookup_pte_addr(blktap_vma->vm_mm,
   8.464 -                                       MMAP_VADDR(user_vstart, idx, i), 
   8.465 -                                       &ptep) !=0) {
   8.466 -                DPRINTK("Couldn't get a pte addr!\n");
   8.467 -                return;
   8.468 -            }
   8.469 -            unmap[op].host_addr    = ptep;
   8.470 -            unmap[op].dev_bus_addr = 0;
   8.471 -            unmap[op].handle       = handle->user;
   8.472 -            op++;
   8.473 +		if (create_lookup_pte_addr(
   8.474 +			blktap_vma->vm_mm,
   8.475 +			MMAP_VADDR(user_vstart, idx, i), 
   8.476 +			&ptep) !=0) {
   8.477 +			DPRINTK("Couldn't get a pte addr!\n");
   8.478 +			return;
   8.479 +		}
   8.480 +		unmap[op].host_addr    = ptep;
   8.481 +		unmap[op].dev_bus_addr = 0;
   8.482 +		unmap[op].handle       = handle->user;
   8.483 +		op++;
   8.484              
   8.485 -           BLKTAP_INVALIDATE_HANDLE(handle);
   8.486 -        }
   8.487 -    }
   8.488 -    if ( unlikely(HYPERVISOR_grant_table_op(
   8.489 -        GNTTABOP_unmap_grant_ref, unmap, op)))
   8.490 -        BUG();
   8.491 +		BLKTAP_INVALIDATE_HANDLE(handle);
   8.492 +	}
   8.493  
   8.494 -    if (blktap_vma != NULL)
   8.495 -        zap_page_range(blktap_vma, 
   8.496 -                       MMAP_VADDR(user_vstart, idx, 0), 
   8.497 -                       nr_pages << PAGE_SHIFT, NULL);
   8.498 +	BUG_ON(HYPERVISOR_grant_table_op(
   8.499 +		GNTTABOP_unmap_grant_ref, unmap, op));
   8.500 +
   8.501 +	if (blktap_vma != NULL)
   8.502 +		zap_page_range(blktap_vma, 
   8.503 +			       MMAP_VADDR(user_vstart, idx, 0), 
   8.504 +			       nr_pages << PAGE_SHIFT, NULL);
   8.505  }
   8.506  
   8.507  /******************************************************************
   8.508 @@ -466,34 +458,38 @@ static spinlock_t blkio_schedule_list_lo
   8.509  
   8.510  static int __on_blkdev_list(blkif_t *blkif)
   8.511  {
   8.512 -    return blkif->blkdev_list.next != NULL;
   8.513 +	return blkif->blkdev_list.next != NULL;
   8.514  }
   8.515  
   8.516  static void remove_from_blkdev_list(blkif_t *blkif)
   8.517  {
   8.518 -    unsigned long flags;
   8.519 -    if ( !__on_blkdev_list(blkif) ) return;
   8.520 -    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   8.521 -    if ( __on_blkdev_list(blkif) )
   8.522 -    {
   8.523 -        list_del(&blkif->blkdev_list);
   8.524 -        blkif->blkdev_list.next = NULL;
   8.525 -        blkif_put(blkif);
   8.526 -    }
   8.527 -    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   8.528 +	unsigned long flags;
   8.529 +
   8.530 +	if (!__on_blkdev_list(blkif))
   8.531 +		return;
   8.532 +
   8.533 +	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   8.534 +	if (__on_blkdev_list(blkif)) {
   8.535 +		list_del(&blkif->blkdev_list);
   8.536 +		blkif->blkdev_list.next = NULL;
   8.537 +		blkif_put(blkif);
   8.538 +	}
   8.539 +	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   8.540  }
   8.541  
   8.542  static void add_to_blkdev_list_tail(blkif_t *blkif)
   8.543  {
   8.544 -    unsigned long flags;
   8.545 -    if ( __on_blkdev_list(blkif) ) return;
   8.546 -    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   8.547 -    if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
   8.548 -    {
   8.549 -        list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
   8.550 -        blkif_get(blkif);
   8.551 -    }
   8.552 -    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   8.553 +	unsigned long flags;
   8.554 +
   8.555 +	if (__on_blkdev_list(blkif))
   8.556 +		return;
   8.557 +
   8.558 +	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   8.559 +	if (!__on_blkdev_list(blkif) && (blkif->status == CONNECTED)) {
   8.560 +		list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
   8.561 +		blkif_get(blkif);
   8.562 +	}
   8.563 +	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   8.564  }
   8.565  
   8.566  
   8.567 @@ -505,51 +501,50 @@ static DECLARE_WAIT_QUEUE_HEAD(blkio_sch
   8.568  
   8.569  static int blkio_schedule(void *arg)
   8.570  {
   8.571 -    DECLARE_WAITQUEUE(wq, current);
   8.572 +	DECLARE_WAITQUEUE(wq, current);
   8.573  
   8.574 -    blkif_t          *blkif;
   8.575 -    struct list_head *ent;
   8.576 +	blkif_t          *blkif;
   8.577 +	struct list_head *ent;
   8.578  
   8.579 -    daemonize("xenblkd");
   8.580 +	daemonize("xenblkd");
   8.581  
   8.582 -    for ( ; ; )
   8.583 -    {
   8.584 -        /* Wait for work to do. */
   8.585 -        add_wait_queue(&blkio_schedule_wait, &wq);
   8.586 -        set_current_state(TASK_INTERRUPTIBLE);
   8.587 -        if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
   8.588 -             list_empty(&blkio_schedule_list) )
   8.589 -            schedule();
   8.590 -        __set_current_state(TASK_RUNNING);
   8.591 -        remove_wait_queue(&blkio_schedule_wait, &wq);
   8.592 +	for (;;) {
   8.593 +		/* Wait for work to do. */
   8.594 +		add_wait_queue(&blkio_schedule_wait, &wq);
   8.595 +		set_current_state(TASK_INTERRUPTIBLE);
   8.596 +		if ((NR_PENDING_REQS == MAX_PENDING_REQS) || 
   8.597 +		    list_empty(&blkio_schedule_list))
   8.598 +			schedule();
   8.599 +		__set_current_state(TASK_RUNNING);
   8.600 +		remove_wait_queue(&blkio_schedule_wait, &wq);
   8.601  
   8.602 -        /* Queue up a batch of requests. */
   8.603 -        while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
   8.604 -                !list_empty(&blkio_schedule_list) )
   8.605 -        {
   8.606 -            ent = blkio_schedule_list.next;
   8.607 -            blkif = list_entry(ent, blkif_t, blkdev_list);
   8.608 -            blkif_get(blkif);
   8.609 -            remove_from_blkdev_list(blkif);
   8.610 -            if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
   8.611 -                add_to_blkdev_list_tail(blkif);
   8.612 -            blkif_put(blkif);
   8.613 -        }
   8.614 -    }
   8.615 +		/* Queue up a batch of requests. */
   8.616 +		while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
   8.617 +		       !list_empty(&blkio_schedule_list)) {
   8.618 +			ent = blkio_schedule_list.next;
   8.619 +			blkif = list_entry(ent, blkif_t, blkdev_list);
   8.620 +			blkif_get(blkif);
   8.621 +			remove_from_blkdev_list(blkif);
   8.622 +			if (do_block_io_op(blkif, BATCH_PER_DOMAIN))
   8.623 +				add_to_blkdev_list_tail(blkif);
   8.624 +			blkif_put(blkif);
   8.625 +		}
   8.626 +	}
   8.627  }
   8.628  
   8.629  static void maybe_trigger_blkio_schedule(void)
   8.630  {
   8.631 -    /*
   8.632 -     * Needed so that two processes, who together make the following predicate
   8.633 -     * true, don't both read stale values and evaluate the predicate
   8.634 -     * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
   8.635 -     */
   8.636 -    smp_mb();
   8.637 +	/*
   8.638 +	 * Needed so that two processes, who together make the following
   8.639 +	 * predicate true, don't both read stale values and evaluate the
   8.640 +	 * predicate incorrectly. Incredibly unlikely to stall the scheduler
   8.641 +	 * on the x86, but...
   8.642 +	 */
   8.643 +	smp_mb();
   8.644  
   8.645 -    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
   8.646 -         !list_empty(&blkio_schedule_list) )
   8.647 -        wake_up(&blkio_schedule_wait);
   8.648 +	if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
   8.649 +	    !list_empty(&blkio_schedule_list))
   8.650 +		wake_up(&blkio_schedule_wait);
   8.651  }
   8.652  
   8.653  
   8.654 @@ -561,54 +556,53 @@ static void maybe_trigger_blkio_schedule
   8.655  
   8.656  static int blktap_read_ufe_ring(void)
   8.657  {
   8.658 -    /* This is called to read responses from the UFE ring. */
   8.659 +	/* This is called to read responses from the UFE ring. */
   8.660  
   8.661 -    RING_IDX i, j, rp;
   8.662 -    blkif_response_t *resp;
   8.663 -    blkif_t *blkif;
   8.664 -    int pending_idx;
   8.665 -    pending_req_t *pending_req;
   8.666 -    unsigned long     flags;
   8.667 +	RING_IDX i, j, rp;
   8.668 +	blkif_response_t *resp;
   8.669 +	blkif_t *blkif;
   8.670 +	int pending_idx;
   8.671 +	pending_req_t *pending_req;
   8.672 +	unsigned long     flags;
   8.673  
   8.674 -    /* if we are forwarding from UFERring to FERing */
   8.675 -    if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) {
   8.676 +	/* if we are forwarding from UFERring to FERing */
   8.677 +	if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) {
   8.678  
   8.679 -        /* for each outstanding message on the UFEring  */
   8.680 -        rp = blktap_ufe_ring.sring->rsp_prod;
   8.681 -        rmb();
   8.682 +		/* for each outstanding message on the UFEring  */
   8.683 +		rp = blktap_ufe_ring.sring->rsp_prod;
   8.684 +		rmb();
   8.685          
   8.686 -        for ( i = blktap_ufe_ring.rsp_cons; i != rp; i++ )
   8.687 -        {
   8.688 -            resp = RING_GET_RESPONSE(&blktap_ufe_ring, i);
   8.689 -            pending_idx = MASK_PEND_IDX(ID_TO_IDX(resp->id));
   8.690 -            pending_req = &pending_reqs[pending_idx];
   8.691 +		for (i = blktap_ufe_ring.rsp_cons; i != rp; i++) {
   8.692 +			resp = RING_GET_RESPONSE(&blktap_ufe_ring, i);
   8.693 +			pending_idx = MASK_PEND_IDX(ID_TO_IDX(resp->id));
   8.694 +			pending_req = &pending_reqs[pending_idx];
   8.695              
   8.696 -            blkif = pending_req->blkif;
   8.697 -            for (j = 0; j < pending_req->nr_pages; j++) {
   8.698 -                unsigned long vaddr;
   8.699 -                struct page **map = blktap_vma->vm_private_data;
   8.700 -                int offset; 
   8.701 +			blkif = pending_req->blkif;
   8.702 +			for (j = 0; j < pending_req->nr_pages; j++) {
   8.703 +				unsigned long vaddr;
   8.704 +				struct page **map = blktap_vma->vm_private_data;
   8.705 +				int offset; 
   8.706  
   8.707 -                vaddr  = MMAP_VADDR(user_vstart, pending_idx, j);
   8.708 -                offset = (vaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
   8.709 +				vaddr  = MMAP_VADDR(user_vstart, pending_idx, j);
   8.710 +				offset = (vaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
   8.711  
   8.712 -                //ClearPageReserved(virt_to_page(vaddr));
   8.713 -                ClearPageReserved((struct page *)map[offset]);
   8.714 -                map[offset] = NULL;
   8.715 -            }
   8.716 +				//ClearPageReserved(virt_to_page(vaddr));
   8.717 +				ClearPageReserved((struct page *)map[offset]);
   8.718 +				map[offset] = NULL;
   8.719 +			}
   8.720  
   8.721 -            fast_flush_area(pending_idx, pending_req->nr_pages);
   8.722 -            make_response(blkif, pending_req->id, resp->operation, 
   8.723 -                          resp->status);
   8.724 -            blkif_put(pending_req->blkif);
   8.725 -            spin_lock_irqsave(&pend_prod_lock, flags);
   8.726 -            pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   8.727 -            spin_unlock_irqrestore(&pend_prod_lock, flags);
   8.728 -        }
   8.729 -        blktap_ufe_ring.rsp_cons = i;
   8.730 -        maybe_trigger_blkio_schedule();
   8.731 -    }
   8.732 -    return 0;
   8.733 +			fast_flush_area(pending_idx, pending_req->nr_pages);
   8.734 +			make_response(blkif, pending_req->id, resp->operation, 
   8.735 +				      resp->status);
   8.736 +			blkif_put(pending_req->blkif);
   8.737 +			spin_lock_irqsave(&pend_prod_lock, flags);
   8.738 +			pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   8.739 +			spin_unlock_irqrestore(&pend_prod_lock, flags);
   8.740 +		}
   8.741 +		blktap_ufe_ring.rsp_cons = i;
   8.742 +		maybe_trigger_blkio_schedule();
   8.743 +	}
   8.744 +	return 0;
   8.745  }
   8.746  
   8.747  
   8.748 @@ -618,10 +612,10 @@ static int blktap_read_ufe_ring(void)
   8.749  
   8.750  irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
   8.751  {
   8.752 -    blkif_t *blkif = dev_id;
   8.753 -    add_to_blkdev_list_tail(blkif);
   8.754 -    maybe_trigger_blkio_schedule();
   8.755 -    return IRQ_HANDLED;
   8.756 +	blkif_t *blkif = dev_id;
   8.757 +	add_to_blkdev_list_tail(blkif);
   8.758 +	maybe_trigger_blkio_schedule();
   8.759 +	return IRQ_HANDLED;
   8.760  }
   8.761  
   8.762  
   8.763 @@ -632,199 +626,194 @@ irqreturn_t blkif_be_int(int irq, void *
   8.764  
   8.765  static int do_block_io_op(blkif_t *blkif, int max_to_do)
   8.766  {
   8.767 -    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   8.768 -    blkif_request_t *req;
   8.769 -    RING_IDX i, rp;
   8.770 -    int more_to_do = 0;
   8.771 +	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   8.772 +	blkif_request_t *req;
   8.773 +	RING_IDX i, rp;
   8.774 +	int more_to_do = 0;
   8.775      
   8.776 -    rp = blk_ring->sring->req_prod;
   8.777 -    rmb(); /* Ensure we see queued requests up to 'rp'. */
   8.778 +	rp = blk_ring->sring->req_prod;
   8.779 +	rmb(); /* Ensure we see queued requests up to 'rp'. */
   8.780  
   8.781 -    for ( i = blk_ring->req_cons; 
   8.782 -         (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
   8.783 -          i++ )
   8.784 -    {
   8.785 -        if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
   8.786 -        {
   8.787 -            more_to_do = 1;
   8.788 -            break;
   8.789 -        }
   8.790 +	for (i = blk_ring->req_cons; 
   8.791 +	     (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
   8.792 +	     i++ ) {
   8.793 +		if ((max_to_do-- == 0) ||
   8.794 +		    (NR_PENDING_REQS == MAX_PENDING_REQS)) {
   8.795 +			more_to_do = 1;
   8.796 +			break;
   8.797 +		}
   8.798          
   8.799 -        req = RING_GET_REQUEST(blk_ring, i);
   8.800 -        switch ( req->operation )
   8.801 -        {
   8.802 -        case BLKIF_OP_READ:
   8.803 -        case BLKIF_OP_WRITE:
   8.804 -            dispatch_rw_block_io(blkif, req);
   8.805 -            break;
   8.806 +		req = RING_GET_REQUEST(blk_ring, i);
   8.807 +		switch (req->operation) {
   8.808 +		case BLKIF_OP_READ:
   8.809 +		case BLKIF_OP_WRITE:
   8.810 +			dispatch_rw_block_io(blkif, req);
   8.811 +			break;
   8.812  
   8.813 -        default:
   8.814 -            DPRINTK("error: unknown block io operation [%d]\n",
   8.815 -                    req->operation);
   8.816 -            make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
   8.817 -            break;
   8.818 -        }
   8.819 -    }
   8.820 +		default:
   8.821 +			DPRINTK("error: unknown block io operation [%d]\n",
   8.822 +				req->operation);
   8.823 +			make_response(blkif, req->id, req->operation,
   8.824 +				      BLKIF_RSP_ERROR);
   8.825 +			break;
   8.826 +		}
   8.827 +	}
   8.828  
   8.829 -    blk_ring->req_cons = i;
   8.830 -    blktap_kick_user();
   8.831 +	blk_ring->req_cons = i;
   8.832 +	blktap_kick_user();
   8.833  
   8.834 -    return more_to_do;
   8.835 +	return more_to_do;
   8.836  }
   8.837  
   8.838  static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
   8.839  {
   8.840 -    blkif_request_t *target;
   8.841 -    int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
   8.842 -    pending_req_t *pending_req;
   8.843 -    struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
   8.844 -    int op, ret;
   8.845 -    unsigned int nseg;
   8.846 +	blkif_request_t *target;
   8.847 +	int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
   8.848 +	pending_req_t *pending_req;
   8.849 +	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
   8.850 +	int op, ret;
   8.851 +	unsigned int nseg;
   8.852  
   8.853 -    /* Check that number of segments is sane. */
   8.854 -    nseg = req->nr_segments;
   8.855 -    if ( unlikely(nseg == 0) || 
   8.856 -         unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
   8.857 -    {
   8.858 -        DPRINTK("Bad number of segments in request (%d)\n", nseg);
   8.859 -        goto bad_descriptor;
   8.860 -    }
   8.861 +	/* Check that number of segments is sane. */
   8.862 +	nseg = req->nr_segments;
   8.863 +	if (unlikely(nseg == 0) || 
   8.864 +	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
   8.865 +		DPRINTK("Bad number of segments in request (%d)\n", nseg);
   8.866 +		goto bad_descriptor;
   8.867 +	}
   8.868  
   8.869 -    /* Make sure userspace is ready. */
   8.870 -    if (!blktap_ring_ok) {
   8.871 -        DPRINTK("blktap: ring not ready for requests!\n");
   8.872 -        goto bad_descriptor;
   8.873 -    }
   8.874 +	/* Make sure userspace is ready. */
   8.875 +	if (!blktap_ring_ok) {
   8.876 +		DPRINTK("blktap: ring not ready for requests!\n");
   8.877 +		goto bad_descriptor;
   8.878 +	}
   8.879      
   8.880  
   8.881 -    if ( RING_FULL(&blktap_ufe_ring) ) {
   8.882 -        WPRINTK("blktap: fe_ring is full, can't add (very broken!).\n");
   8.883 -        goto bad_descriptor;
   8.884 -    }
   8.885 -
   8.886 -    flush_cache_all(); /* a noop on intel... */
   8.887 +	if (RING_FULL(&blktap_ufe_ring)) {
   8.888 +		WPRINTK("blktap: fe_ring is full, can't add "
   8.889 +			"(very broken!).\n");
   8.890 +		goto bad_descriptor;
   8.891 +	}
   8.892  
   8.893 -    /* Map the foreign pages directly in to the application */    
   8.894 -    op = 0;
   8.895 -    for (i=0; i<req->nr_segments; i++) {
   8.896 -
   8.897 -        unsigned long uvaddr;
   8.898 -        unsigned long kvaddr;
   8.899 -        unsigned long ptep;
   8.900 +	flush_cache_all(); /* a noop on intel... */
   8.901  
   8.902 -        uvaddr = MMAP_VADDR(user_vstart, pending_idx, i);
   8.903 -        kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
   8.904 +	/* Map the foreign pages directly in to the application */    
   8.905 +	op = 0;
   8.906 +	for (i = 0; i < req->nr_segments; i++) {
   8.907  
   8.908 -        /* Map the remote page to kernel. */
   8.909 -        map[op].host_addr = kvaddr;
   8.910 -        map[op].dom   = blkif->domid;
   8.911 -        map[op].ref   = blkif_gref_from_fas(req->frame_and_sects[i]);
   8.912 -        map[op].flags = GNTMAP_host_map;
   8.913 -        /* This needs a bit more thought in terms of interposition: 
   8.914 -         * If we want to be able to modify pages during write using 
   8.915 -         * grant table mappings, the guest will either need to allow 
   8.916 -         * it, or we'll need to incur a copy. Bit of an fbufs moment. ;) */
   8.917 -        if (req->operation == BLKIF_OP_WRITE)
   8.918 -            map[op].flags |= GNTMAP_readonly;
   8.919 -        op++;
   8.920 +		unsigned long uvaddr;
   8.921 +		unsigned long kvaddr;
   8.922 +		unsigned long ptep;
   8.923 +
   8.924 +		uvaddr = MMAP_VADDR(user_vstart, pending_idx, i);
   8.925 +		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
   8.926  
   8.927 -        /* Now map it to user. */
   8.928 -        ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep);
   8.929 -        if (ret)
   8.930 -        {
   8.931 -            DPRINTK("Couldn't get a pte addr!\n");
   8.932 -            fast_flush_area(pending_idx, req->nr_segments);
   8.933 -            goto bad_descriptor;
   8.934 -        }
   8.935 +		/* Map the remote page to kernel. */
   8.936 +		map[op].host_addr = kvaddr;
   8.937 +		map[op].dom   = blkif->domid;
   8.938 +		map[op].ref   = blkif_gref_from_fas(req->frame_and_sects[i]);
   8.939 +		map[op].flags = GNTMAP_host_map;
   8.940 +		/* This needs a bit more thought in terms of interposition: 
   8.941 +		 * If we want to be able to modify pages during write using 
   8.942 +		 * grant table mappings, the guest will either need to allow 
   8.943 +		 * it, or we'll need to incur a copy. Bit of an fbufs moment. ;) */
   8.944 +		if (req->operation == BLKIF_OP_WRITE)
   8.945 +			map[op].flags |= GNTMAP_readonly;
   8.946 +		op++;
   8.947 +
   8.948 +		/* Now map it to user. */
   8.949 +		ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep);
   8.950 +		if (ret) {
   8.951 +			DPRINTK("Couldn't get a pte addr!\n");
   8.952 +			fast_flush_area(pending_idx, req->nr_segments);
   8.953 +			goto bad_descriptor;
   8.954 +		}
   8.955  
   8.956 -        map[op].host_addr = ptep;
   8.957 -        map[op].dom       = blkif->domid;
   8.958 -        map[op].ref       = blkif_gref_from_fas(req->frame_and_sects[i]);
   8.959 -        map[op].flags     = GNTMAP_host_map | GNTMAP_application_map
   8.960 -                            | GNTMAP_contains_pte;
   8.961 -        /* Above interposition comment applies here as well. */
   8.962 -        if (req->operation == BLKIF_OP_WRITE)
   8.963 -            map[op].flags |= GNTMAP_readonly;
   8.964 -        op++;
   8.965 -    }
   8.966 +		map[op].host_addr = ptep;
   8.967 +		map[op].dom       = blkif->domid;
   8.968 +		map[op].ref       = blkif_gref_from_fas(req->frame_and_sects[i]);
   8.969 +		map[op].flags     = GNTMAP_host_map | GNTMAP_application_map
   8.970 +			| GNTMAP_contains_pte;
   8.971 +		/* Above interposition comment applies here as well. */
   8.972 +		if (req->operation == BLKIF_OP_WRITE)
   8.973 +			map[op].flags |= GNTMAP_readonly;
   8.974 +		op++;
   8.975 +	}
   8.976  
   8.977 -    if ( unlikely(HYPERVISOR_grant_table_op(
   8.978 -            GNTTABOP_map_grant_ref, map, op)))
   8.979 -        BUG();
   8.980 +	BUG_ON(HYPERVISOR_grant_table_op(
   8.981 +		GNTTABOP_map_grant_ref, map, op));
   8.982  
   8.983 -    op = 0;
   8.984 -    for (i=0; i<(req->nr_segments*2); i+=2) {
   8.985 -        unsigned long uvaddr;
   8.986 -        unsigned long kvaddr;
   8.987 -        unsigned long offset;
   8.988 -        int cancel = 0;
   8.989 +	op = 0;
   8.990 +	for (i = 0; i < (req->nr_segments*2); i += 2) {
   8.991 +		unsigned long uvaddr;
   8.992 +		unsigned long kvaddr;
   8.993 +		unsigned long offset;
   8.994 +		int cancel = 0;
   8.995  
   8.996 -        uvaddr = MMAP_VADDR(user_vstart, pending_idx, i/2);
   8.997 -        kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i/2);
   8.998 +		uvaddr = MMAP_VADDR(user_vstart, pending_idx, i/2);
   8.999 +		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i/2);
  8.1000  
  8.1001 -        if ( unlikely(map[i].handle < 0) ) 
  8.1002 -        {
  8.1003 -            DPRINTK("Error on kernel grant mapping (%d)\n", map[i].handle);
  8.1004 -            ret = map[i].handle;
  8.1005 -            cancel = 1;
  8.1006 -        }
  8.1007 +		if (unlikely(map[i].handle < 0)) {
  8.1008 +			DPRINTK("Error on kernel grant mapping (%d)\n",
  8.1009 +				map[i].handle);
  8.1010 +			ret = map[i].handle;
  8.1011 +			cancel = 1;
  8.1012 +		}
  8.1013  
  8.1014 -        if ( unlikely(map[i+1].handle < 0) ) 
  8.1015 -        {
  8.1016 -            DPRINTK("Error on user grant mapping (%d)\n", map[i+1].handle);
  8.1017 -            ret = map[i+1].handle;
  8.1018 -            cancel = 1;
  8.1019 -        }
  8.1020 +		if (unlikely(map[i+1].handle < 0)) {
  8.1021 +			DPRINTK("Error on user grant mapping (%d)\n",
  8.1022 +				map[i+1].handle);
  8.1023 +			ret = map[i+1].handle;
  8.1024 +			cancel = 1;
  8.1025 +		}
  8.1026  
  8.1027 -        if (cancel) 
  8.1028 -        {
  8.1029 -            fast_flush_area(pending_idx, req->nr_segments);
  8.1030 -            goto bad_descriptor;
  8.1031 -        }
  8.1032 +		if (cancel) {
  8.1033 +			fast_flush_area(pending_idx, req->nr_segments);
  8.1034 +			goto bad_descriptor;
  8.1035 +		}
  8.1036  
  8.1037 -        /* Set the necessary mappings in p2m and in the VM_FOREIGN 
  8.1038 -         * vm_area_struct to allow user vaddr -> struct page lookups
  8.1039 -         * to work.  This is needed for direct IO to foreign pages. */
  8.1040 -        phys_to_machine_mapping[__pa(kvaddr) >> PAGE_SHIFT] =
  8.1041 -            FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
  8.1042 +		/* Set the necessary mappings in p2m and in the VM_FOREIGN 
  8.1043 +		 * vm_area_struct to allow user vaddr -> struct page lookups
  8.1044 +		 * to work.  This is needed for direct IO to foreign pages. */
  8.1045 +		phys_to_machine_mapping[__pa(kvaddr) >> PAGE_SHIFT] =
  8.1046 +			FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
  8.1047  
  8.1048 -        offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
  8.1049 -        ((struct page **)blktap_vma->vm_private_data)[offset] =
  8.1050 -            pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
  8.1051 +		offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
  8.1052 +		((struct page **)blktap_vma->vm_private_data)[offset] =
  8.1053 +			pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
  8.1054  
  8.1055 -        /* Save handles for unmapping later. */
  8.1056 -        pending_handle(pending_idx, i/2).kernel = map[i].handle;
  8.1057 -        pending_handle(pending_idx, i/2).user   = map[i+1].handle;
  8.1058 -    }
  8.1059 +		/* Save handles for unmapping later. */
  8.1060 +		pending_handle(pending_idx, i/2).kernel = map[i].handle;
  8.1061 +		pending_handle(pending_idx, i/2).user   = map[i+1].handle;
  8.1062 +	}
  8.1063  
  8.1064 -    /* Mark mapped pages as reserved: */
  8.1065 -    for ( i = 0; i < req->nr_segments; i++ )
  8.1066 -    {
  8.1067 -        unsigned long kvaddr;
  8.1068 +	/* Mark mapped pages as reserved: */
  8.1069 +	for (i = 0; i < req->nr_segments; i++) {
  8.1070 +		unsigned long kvaddr;
  8.1071 +		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
  8.1072 +		SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT));
  8.1073 +	}
  8.1074  
  8.1075 -        kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
  8.1076 -        SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT));
  8.1077 -    }
  8.1078 +	pending_req = &pending_reqs[pending_idx];
  8.1079 +	pending_req->blkif     = blkif;
  8.1080 +	pending_req->id        = req->id;
  8.1081 +	pending_req->operation = req->operation;
  8.1082 +	pending_req->status    = BLKIF_RSP_OKAY;
  8.1083 +	pending_req->nr_pages  = nseg;
  8.1084 +	req->id = MAKE_ID(blkif->domid, pending_idx);
  8.1085 +	//atomic_set(&pending_req->pendcnt, nbio);
  8.1086 +	pending_cons++;
  8.1087 +	blkif_get(blkif);
  8.1088  
  8.1089 -    pending_req = &pending_reqs[pending_idx];
  8.1090 -    pending_req->blkif     = blkif;
  8.1091 -    pending_req->id        = req->id;
  8.1092 -    pending_req->operation = req->operation;
  8.1093 -    pending_req->status    = BLKIF_RSP_OKAY;
  8.1094 -    pending_req->nr_pages  = nseg;
  8.1095 -    req->id = MAKE_ID(blkif->domid, pending_idx);
  8.1096 -    //atomic_set(&pending_req->pendcnt, nbio);
  8.1097 -    pending_cons++;
  8.1098 -    blkif_get(blkif);
  8.1099 -
  8.1100 -    /* Finally, write the request message to the user ring. */
  8.1101 -    target = RING_GET_REQUEST(&blktap_ufe_ring, blktap_ufe_ring.req_prod_pvt);
  8.1102 -    memcpy(target, req, sizeof(*req));
  8.1103 -    blktap_ufe_ring.req_prod_pvt++;
  8.1104 -    return;
  8.1105 +	/* Finally, write the request message to the user ring. */
  8.1106 +	target = RING_GET_REQUEST(&blktap_ufe_ring,
  8.1107 +				  blktap_ufe_ring.req_prod_pvt);
  8.1108 +	memcpy(target, req, sizeof(*req));
  8.1109 +	blktap_ufe_ring.req_prod_pvt++;
  8.1110 +	return;
  8.1111  
  8.1112   bad_descriptor:
  8.1113 -    make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
  8.1114 +	make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
  8.1115  } 
  8.1116  
  8.1117  
  8.1118 @@ -837,80 +826,89 @@ static void dispatch_rw_block_io(blkif_t
  8.1119  static void make_response(blkif_t *blkif, unsigned long id, 
  8.1120                            unsigned short op, int st)
  8.1121  {
  8.1122 -    blkif_response_t *resp;
  8.1123 -    unsigned long     flags;
  8.1124 -    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
  8.1125 +	blkif_response_t *resp;
  8.1126 +	unsigned long     flags;
  8.1127 +	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
  8.1128  
  8.1129 -    /* Place on the response ring for the relevant domain. */ 
  8.1130 -    spin_lock_irqsave(&blkif->blk_ring_lock, flags);
  8.1131 -    resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
  8.1132 -    resp->id        = id;
  8.1133 -    resp->operation = op;
  8.1134 -    resp->status    = st;
  8.1135 -    wmb(); /* Ensure other side can see the response fields. */
  8.1136 -    blk_ring->rsp_prod_pvt++;
  8.1137 -    RING_PUSH_RESPONSES(blk_ring);
  8.1138 -    spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
  8.1139 +	/* Place on the response ring for the relevant domain. */ 
  8.1140 +	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
  8.1141 +	resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
  8.1142 +	resp->id        = id;
  8.1143 +	resp->operation = op;
  8.1144 +	resp->status    = st;
  8.1145 +	wmb(); /* Ensure other side can see the response fields. */
  8.1146 +	blk_ring->rsp_prod_pvt++;
  8.1147 +	RING_PUSH_RESPONSES(blk_ring);
  8.1148 +	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
  8.1149  
  8.1150 -    /* Kick the relevant domain. */
  8.1151 -    notify_via_evtchn(blkif->evtchn);
  8.1152 +	/* Kick the relevant domain. */
  8.1153 +	notify_via_evtchn(blkif->evtchn);
  8.1154  }
  8.1155  
  8.1156  static struct miscdevice blktap_miscdev = {
  8.1157 -    .minor        = BLKTAP_MINOR,
  8.1158 -    .name         = "blktap",
  8.1159 -    .fops         = &blktap_fops,
  8.1160 -    .devfs_name   = "misc/blktap",
  8.1161 +	.minor        = BLKTAP_MINOR,
  8.1162 +	.name         = "blktap",
  8.1163 +	.fops         = &blktap_fops,
  8.1164 +	.devfs_name   = "misc/blktap",
  8.1165  };
  8.1166  
  8.1167  void blkif_deschedule(blkif_t *blkif)
  8.1168  {
  8.1169 -    remove_from_blkdev_list(blkif);
  8.1170 +	remove_from_blkdev_list(blkif);
  8.1171  }
  8.1172  
  8.1173  static int __init blkif_init(void)
  8.1174  {
  8.1175 -    int i, j, err;
  8.1176 -    struct page *page;
  8.1177 +	int i, j, err;
  8.1178 +	struct page *page;
  8.1179  /*
  8.1180 -    if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
  8.1181 -         !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
  8.1182 -        return 0;
  8.1183 +  if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
  8.1184 +  !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
  8.1185 +  return 0;
  8.1186  */
  8.1187 -    blkif_interface_init();
  8.1188 +	blkif_interface_init();
  8.1189  
  8.1190 -    page = balloon_alloc_empty_page_range(MMAP_PAGES);
  8.1191 -    BUG_ON(page == NULL);
  8.1192 -    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
  8.1193 +	page = balloon_alloc_empty_page_range(MMAP_PAGES);
  8.1194 +	BUG_ON(page == NULL);
  8.1195 +	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
  8.1196  
  8.1197 -    pending_cons = 0;
  8.1198 -    pending_prod = MAX_PENDING_REQS;
  8.1199 -    memset(pending_reqs, 0, sizeof(pending_reqs));
  8.1200 -    for ( i = 0; i < MAX_PENDING_REQS; i++ )
  8.1201 -        pending_ring[i] = i;
  8.1202 +	pending_cons = 0;
  8.1203 +	pending_prod = MAX_PENDING_REQS;
  8.1204 +	memset(pending_reqs, 0, sizeof(pending_reqs));
  8.1205 +	for ( i = 0; i < MAX_PENDING_REQS; i++ )
  8.1206 +		pending_ring[i] = i;
  8.1207      
  8.1208 -    spin_lock_init(&blkio_schedule_list_lock);
  8.1209 -    INIT_LIST_HEAD(&blkio_schedule_list);
  8.1210 +	spin_lock_init(&blkio_schedule_list_lock);
  8.1211 +	INIT_LIST_HEAD(&blkio_schedule_list);
  8.1212 +
  8.1213 +	BUG_ON(kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0);
  8.1214  
  8.1215 -    if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
  8.1216 -        BUG();
  8.1217 +	blkif_xenbus_init();
  8.1218  
  8.1219 -    blkif_xenbus_init();
  8.1220 +	for (i = 0; i < MAX_PENDING_REQS ; i++)
  8.1221 +		for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
  8.1222 +			BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j));
  8.1223  
  8.1224 -    for (i=0; i<MAX_PENDING_REQS ; i++)
  8.1225 -        for (j=0; j<BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
  8.1226 -            BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j));
  8.1227 +	err = misc_register(&blktap_miscdev);
  8.1228 +	if (err != 0) {
  8.1229 +		printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n",
  8.1230 +		       err);
  8.1231 +		return err;
  8.1232 +	}
  8.1233  
  8.1234 -    err = misc_register(&blktap_miscdev);
  8.1235 -    if ( err != 0 )
  8.1236 -    {
  8.1237 -        printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n", err);
  8.1238 -        return err;
  8.1239 -    }
  8.1240 +	init_waitqueue_head(&blktap_wait);
  8.1241  
  8.1242 -    init_waitqueue_head(&blktap_wait);
  8.1243 -
  8.1244 -    return 0;
  8.1245 +	return 0;
  8.1246  }
  8.1247  
  8.1248  __initcall(blkif_init);
  8.1249 +
  8.1250 +/*
  8.1251 + * Local variables:
  8.1252 + *  c-file-style: "linux"
  8.1253 + *  indent-tabs-mode: t
  8.1254 + *  c-indent-level: 8
  8.1255 + *  c-basic-offset: 8
  8.1256 + *  tab-width: 8
  8.1257 + * End:
  8.1258 + */
     9.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/common.h	Thu Sep 22 14:01:01 2005 +0100
     9.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/common.h	Thu Sep 22 14:04:14 2005 +0100
     9.3 @@ -33,39 +33,39 @@
     9.4  #define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
     9.5  
     9.6  struct vbd {
     9.7 -    blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
     9.8 -    unsigned char  readonly;    /* Non-zero -> read-only */
     9.9 -    unsigned char  type;        /* VDISK_xxx */
    9.10 -    u32            pdevice;     /* phys device that this vbd maps to */
    9.11 -    struct block_device *bdev;
    9.12 +	blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
    9.13 +	unsigned char  readonly;    /* Non-zero -> read-only */
    9.14 +	unsigned char  type;        /* VDISK_xxx */
    9.15 +	u32            pdevice;     /* phys device that this vbd maps to */
    9.16 +	struct block_device *bdev;
    9.17  }; 
    9.18  
    9.19  typedef struct blkif_st {
    9.20 -    /* Unique identifier for this interface. */
    9.21 -    domid_t           domid;
    9.22 -    unsigned int      handle;
    9.23 -    /* Physical parameters of the comms window. */
    9.24 -    unsigned int      evtchn;
    9.25 -    unsigned int      remote_evtchn;
    9.26 -    /* Comms information. */
    9.27 -    blkif_back_ring_t blk_ring;
    9.28 -    struct vm_struct *blk_ring_area;
    9.29 -    /* VBDs attached to this interface. */
    9.30 -    struct vbd        vbd;
    9.31 -    /* Private fields. */
    9.32 -    enum { DISCONNECTED, CONNECTED } status;
    9.33 +	/* Unique identifier for this interface. */
    9.34 +	domid_t           domid;
    9.35 +	unsigned int      handle;
    9.36 +	/* Physical parameters of the comms window. */
    9.37 +	unsigned int      evtchn;
    9.38 +	unsigned int      remote_evtchn;
    9.39 +	/* Comms information. */
    9.40 +	blkif_back_ring_t blk_ring;
    9.41 +	struct vm_struct *blk_ring_area;
    9.42 +	/* VBDs attached to this interface. */
    9.43 +	struct vbd        vbd;
    9.44 +	/* Private fields. */
    9.45 +	enum { DISCONNECTED, CONNECTED } status;
    9.46  #ifdef CONFIG_XEN_BLKDEV_TAP_BE
    9.47 -    /* Is this a blktap frontend */
    9.48 -    unsigned int     is_blktap;
    9.49 +	/* Is this a blktap frontend */
    9.50 +	unsigned int     is_blktap;
    9.51  #endif
    9.52 -    struct list_head blkdev_list;
    9.53 -    spinlock_t       blk_ring_lock;
    9.54 -    atomic_t         refcnt;
    9.55 +	struct list_head blkdev_list;
    9.56 +	spinlock_t       blk_ring_lock;
    9.57 +	atomic_t         refcnt;
    9.58  
    9.59 -    struct work_struct free_work;
    9.60 +	struct work_struct free_work;
    9.61  
    9.62 -    u16              shmem_handle;
    9.63 -    grant_ref_t      shmem_ref;
    9.64 +	u16              shmem_handle;
    9.65 +	grant_ref_t      shmem_ref;
    9.66  } blkif_t;
    9.67  
    9.68  blkif_t *alloc_blkif(domid_t domid);
    9.69 @@ -89,10 +89,10 @@ unsigned int vbd_info(struct vbd *vbd);
    9.70  unsigned long vbd_secsize(struct vbd *vbd);
    9.71  
    9.72  struct phys_req {
    9.73 -    unsigned short       dev;
    9.74 -    unsigned short       nr_sects;
    9.75 -    struct block_device *bdev;
    9.76 -    blkif_sector_t       sector_number;
    9.77 +	unsigned short       dev;
    9.78 +	unsigned short       nr_sects;
    9.79 +	struct block_device *bdev;
    9.80 +	blkif_sector_t       sector_number;
    9.81  };
    9.82  
    9.83  int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation); 
    9.84 @@ -106,3 +106,13 @@ void blkif_xenbus_init(void);
    9.85  irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
    9.86  
    9.87  #endif /* __BLKIF__BACKEND__COMMON_H__ */
    9.88 +
    9.89 +/*
    9.90 + * Local variables:
    9.91 + *  c-file-style: "linux"
    9.92 + *  indent-tabs-mode: t
    9.93 + *  c-indent-level: 8
    9.94 + *  c-basic-offset: 8
    9.95 + *  tab-width: 8
    9.96 + * End:
    9.97 + */
    10.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c	Thu Sep 22 14:01:01 2005 +0100
    10.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c	Thu Sep 22 14:04:14 2005 +0100
    10.3 @@ -222,3 +222,13 @@ void blkif_xenbus_init(void)
    10.4  {
    10.5  	xenbus_register_backend(&blkback);
    10.6  }
    10.7 +
    10.8 +/*
    10.9 + * Local variables:
   10.10 + *  c-file-style: "linux"
   10.11 + *  indent-tabs-mode: t
   10.12 + *  c-indent-level: 8
   10.13 + *  c-basic-offset: 8
   10.14 + *  tab-width: 8
   10.15 + * End:
   10.16 + */
    11.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/console.c	Thu Sep 22 14:01:01 2005 +0100
    11.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/console.c	Thu Sep 22 14:04:14 2005 +0100
    11.3 @@ -75,31 +75,33 @@ extern int sysrq_enabled;
    11.4  
    11.5  static int __init xencons_setup(char *str)
    11.6  {
    11.7 -    char *q;
    11.8 -    int n;
    11.9 +	char *q;
   11.10 +	int n;
   11.11  
   11.12 -    if ( !strncmp(str, "ttyS", 4) )
   11.13 -        xc_mode = XC_SERIAL;
   11.14 -    else if ( !strncmp(str, "tty", 3) )
   11.15 -        xc_mode = XC_TTY;
   11.16 -    else if ( !strncmp(str, "off", 3) )
   11.17 -        xc_mode = XC_OFF;
   11.18 +	if (!strncmp(str, "ttyS", 4))
   11.19 +		xc_mode = XC_SERIAL;
   11.20 +	else if (!strncmp(str, "tty", 3))
   11.21 +		xc_mode = XC_TTY;
   11.22 +	else if (!strncmp(str, "off", 3))
   11.23 +		xc_mode = XC_OFF;
   11.24  
   11.25 -    switch ( xc_mode )
   11.26 -    {
   11.27 -    case XC_SERIAL:
   11.28 -        n = simple_strtol( str+4, &q, 10 );
   11.29 -        if ( q > (str + 4) ) xc_num = n;
   11.30 -        break;
   11.31 -    case XC_TTY:
   11.32 -        n = simple_strtol( str+3, &q, 10 );
   11.33 -        if ( q > (str + 3) ) xc_num = n;
   11.34 -        break;
   11.35 -    default:
   11.36 -        break;
   11.37 -    }
   11.38 +	switch ( xc_mode )
   11.39 +	{
   11.40 +	case XC_SERIAL:
   11.41 +		n = simple_strtol(str+4, &q, 10);
   11.42 +		if (q > (str + 4))
   11.43 +			xc_num = n;
   11.44 +		break;
   11.45 +	case XC_TTY:
   11.46 +		n = simple_strtol(str+3, &q, 10);
   11.47 +		if (q > (str + 3))
   11.48 +			xc_num = n;
   11.49 +		break;
   11.50 +	default:
   11.51 +		break;
   11.52 +	}
   11.53  
   11.54 -    return 1;
   11.55 +	return 1;
   11.56  }
   11.57  __setup("xencons=", xencons_setup);
   11.58  
   11.59 @@ -111,11 +113,11 @@ static unsigned int wc, wp; /* write_con
   11.60  
   11.61  static int __init xencons_bufsz_setup(char *str)
   11.62  {
   11.63 -    unsigned int goal;
   11.64 -    goal = simple_strtoul(str, NULL, 0);
   11.65 -    while ( wbuf_size < goal )
   11.66 -        wbuf_size <<= 1;
   11.67 -    return 1;
   11.68 +	unsigned int goal;
   11.69 +	goal = simple_strtoul(str, NULL, 0);
   11.70 +	while (wbuf_size < goal)
   11.71 +		wbuf_size <<= 1;
   11.72 +	return 1;
   11.73  }
   11.74  __setup("xencons_bufsz=", xencons_bufsz_setup);
   11.75  
   11.76 @@ -135,57 +137,55 @@ static struct tty_driver xencons_driver;
   11.77  /******************** Kernel console driver ********************************/
   11.78  
   11.79  static void kcons_write(
   11.80 -    struct console *c, const char *s, unsigned int count)
   11.81 +	struct console *c, const char *s, unsigned int count)
   11.82  {
   11.83 -    int           i;
   11.84 -    unsigned long flags;
   11.85 +	int           i;
   11.86 +	unsigned long flags;
   11.87  
   11.88 -    spin_lock_irqsave(&xencons_lock, flags);
   11.89 +	spin_lock_irqsave(&xencons_lock, flags);
   11.90      
   11.91 -    for ( i = 0; i < count; i++ )
   11.92 -    {
   11.93 -        if ( (wp - wc) >= (wbuf_size - 1) )
   11.94 -            break;
   11.95 -        if ( (wbuf[WBUF_MASK(wp++)] = s[i]) == '\n' )
   11.96 -            wbuf[WBUF_MASK(wp++)] = '\r';
   11.97 -    }
   11.98 +	for (i = 0; i < count; i++) {
   11.99 +		if ((wp - wc) >= (wbuf_size - 1))
  11.100 +			break;
  11.101 +		if ((wbuf[WBUF_MASK(wp++)] = s[i]) == '\n')
  11.102 +			wbuf[WBUF_MASK(wp++)] = '\r';
  11.103 +	}
  11.104  
  11.105 -    __xencons_tx_flush();
  11.106 +	__xencons_tx_flush();
  11.107  
  11.108 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.109 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.110  }
  11.111  
  11.112  static void kcons_write_dom0(
  11.113 -    struct console *c, const char *s, unsigned int count)
  11.114 +	struct console *c, const char *s, unsigned int count)
  11.115  {
  11.116 -    int rc;
  11.117 +	int rc;
  11.118  
  11.119 -    while ( (count > 0) &&
  11.120 -            ((rc = HYPERVISOR_console_io(
  11.121 -                CONSOLEIO_write, count, (char *)s)) > 0) )
  11.122 -    {
  11.123 -        count -= rc;
  11.124 -        s += rc;
  11.125 -    }
  11.126 +	while ((count > 0) &&
  11.127 +	       ((rc = HYPERVISOR_console_io(
  11.128 +			CONSOLEIO_write, count, (char *)s)) > 0)) {
  11.129 +		count -= rc;
  11.130 +		s += rc;
  11.131 +	}
  11.132  }
  11.133  
  11.134  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  11.135  static struct tty_driver *kcons_device(struct console *c, int *index)
  11.136  {
  11.137 -    *index = c->index;
  11.138 -    return xencons_driver;
  11.139 +	*index = c->index;
  11.140 +	return xencons_driver;
  11.141  }
  11.142  #else
  11.143  static kdev_t kcons_device(struct console *c)
  11.144  {
  11.145 -    return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1);
  11.146 +	return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1);
  11.147  }
  11.148  #endif
  11.149  
  11.150  static struct console kcons_info = {
  11.151 -    .device	= kcons_device,
  11.152 -    .flags	= CON_PRINTBUFFER,
  11.153 -    .index	= -1,
  11.154 +	.device	= kcons_device,
  11.155 +	.flags	= CON_PRINTBUFFER,
  11.156 +	.index	= -1,
  11.157  };
  11.158  
  11.159  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  11.160 @@ -196,44 +196,42 @@ static int __init xen_console_init(void)
  11.161  void xen_console_init(void)
  11.162  #endif
  11.163  {
  11.164 -    if ( xen_start_info->flags & SIF_INITDOMAIN )
  11.165 -    {
  11.166 -        if ( xc_mode == XC_DEFAULT )
  11.167 -            xc_mode = XC_SERIAL;
  11.168 -        kcons_info.write = kcons_write_dom0;
  11.169 +	if (xen_start_info->flags & SIF_INITDOMAIN) {
  11.170 +		if (xc_mode == XC_DEFAULT)
  11.171 +			xc_mode = XC_SERIAL;
  11.172 +		kcons_info.write = kcons_write_dom0;
  11.173  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  11.174 -        if ( xc_mode == XC_SERIAL )
  11.175 -            kcons_info.flags |= CON_ENABLED;
  11.176 +		if (xc_mode == XC_SERIAL)
  11.177 +			kcons_info.flags |= CON_ENABLED;
  11.178  #endif
  11.179 -    }
  11.180 -    else
  11.181 -    {
  11.182 -        if ( xc_mode == XC_DEFAULT )
  11.183 -            xc_mode = XC_TTY;
  11.184 -        kcons_info.write = kcons_write;
  11.185 -    }
  11.186 +	} else {
  11.187 +		if (xc_mode == XC_DEFAULT)
  11.188 +			xc_mode = XC_TTY;
  11.189 +		kcons_info.write = kcons_write;
  11.190 +	}
  11.191  
  11.192 -    switch ( xc_mode )
  11.193 -    {
  11.194 -    case XC_SERIAL:
  11.195 -        strcpy(kcons_info.name, "ttyS");
  11.196 -        if ( xc_num == -1 ) xc_num = 0;
  11.197 -        break;
  11.198 +	switch (xc_mode) {
  11.199 +	case XC_SERIAL:
  11.200 +		strcpy(kcons_info.name, "ttyS");
  11.201 +		if (xc_num == -1)
  11.202 +			xc_num = 0;
  11.203 +		break;
  11.204  
  11.205 -    case XC_TTY:
  11.206 -        strcpy(kcons_info.name, "tty");
  11.207 -        if ( xc_num == -1 ) xc_num = 1;
  11.208 -        break;
  11.209 +	case XC_TTY:
  11.210 +		strcpy(kcons_info.name, "tty");
  11.211 +		if (xc_num == -1)
  11.212 +			xc_num = 1;
  11.213 +		break;
  11.214  
  11.215 -    default:
  11.216 -        return __RETCODE;
  11.217 -    }
  11.218 +	default:
  11.219 +		return __RETCODE;
  11.220 +	}
  11.221  
  11.222 -    wbuf = alloc_bootmem(wbuf_size);
  11.223 +	wbuf = alloc_bootmem(wbuf_size);
  11.224  
  11.225 -    register_console(&kcons_info);
  11.226 +	register_console(&kcons_info);
  11.227  
  11.228 -    return __RETCODE;
  11.229 +	return __RETCODE;
  11.230  }
  11.231  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  11.232  console_initcall(xen_console_init);
  11.233 @@ -246,41 +244,40 @@ asmlinkage int xprintk(const char *fmt, 
  11.234  asmlinkage int xprintk(const char *fmt, ...)
  11.235  #endif
  11.236  {
  11.237 -    va_list args;
  11.238 -    int printk_len;
  11.239 -    static char printk_buf[1024];
  11.240 +	va_list args;
  11.241 +	int printk_len;
  11.242 +	static char printk_buf[1024];
  11.243      
  11.244 -    /* Emit the output into the temporary buffer */
  11.245 -    va_start(args, fmt);
  11.246 -    printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
  11.247 -    va_end(args);
  11.248 +	/* Emit the output into the temporary buffer */
  11.249 +	va_start(args, fmt);
  11.250 +	printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
  11.251 +	va_end(args);
  11.252  
  11.253 -    /* Send the processed output directly to Xen. */
  11.254 -    kcons_write_dom0(NULL, printk_buf, printk_len);
  11.255 +	/* Send the processed output directly to Xen. */
  11.256 +	kcons_write_dom0(NULL, printk_buf, printk_len);
  11.257  
  11.258 -    return 0;
  11.259 +	return 0;
  11.260  }
  11.261  
  11.262  /*** Forcibly flush console data before dying. ***/
  11.263  void xencons_force_flush(void)
  11.264  {
  11.265 -    int        sz;
  11.266 +	int sz;
  11.267  
  11.268 -    /* Emergency console is synchronous, so there's nothing to flush. */
  11.269 -    if ( xen_start_info->flags & SIF_INITDOMAIN )
  11.270 -        return;
  11.271 +	/* Emergency console is synchronous, so there's nothing to flush. */
  11.272 +	if (xen_start_info->flags & SIF_INITDOMAIN)
  11.273 +		return;
  11.274  
  11.275  
  11.276 -    /* Spin until console data is flushed through to the domain controller. */
  11.277 -    while ( (wc != wp) )
  11.278 -    {
  11.279 -	int sent = 0;
  11.280 -        if ( (sz = wp - wc) == 0 )
  11.281 -            continue;
  11.282 -	sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
  11.283 -	if (sent > 0)
  11.284 -	    wc += sent;
  11.285 -    }
  11.286 +	/* Spin until console data is flushed through to the daemon. */
  11.287 +	while (wc != wp) {
  11.288 +		int sent = 0;
  11.289 +		if ((sz = wp - wc) == 0)
  11.290 +			continue;
  11.291 +		sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
  11.292 +		if (sent > 0)
  11.293 +			wc += sent;
  11.294 +	}
  11.295  }
  11.296  
  11.297  
  11.298 @@ -305,362 +302,358 @@ static char x_char;
  11.299  /* Non-privileged receive callback. */
  11.300  static void xencons_rx(char *buf, unsigned len, struct pt_regs *regs)
  11.301  {
  11.302 -    int           i;
  11.303 -    unsigned long flags;
  11.304 +	int           i;
  11.305 +	unsigned long flags;
  11.306  
  11.307 -    spin_lock_irqsave(&xencons_lock, flags);
  11.308 -    if ( xencons_tty != NULL )
  11.309 -    {
  11.310 -        for ( i = 0; i < len; i++ ) {
  11.311 +	spin_lock_irqsave(&xencons_lock, flags);
  11.312 +	if (xencons_tty == NULL)
  11.313 +		goto out;
  11.314 +
  11.315 +	for (i = 0; i < len; i++) {
  11.316  #ifdef CONFIG_MAGIC_SYSRQ
  11.317 -            if (sysrq_enabled) {
  11.318 -                if (buf[i] == '\x0f') { /* ^O */
  11.319 -                    sysrq_requested = jiffies;
  11.320 -                    continue; /* don't print the sysrq key */
  11.321 -                } else if (sysrq_requested) {
  11.322 -                    unsigned long sysrq_timeout = sysrq_requested + HZ*2;
  11.323 -                    sysrq_requested = 0;
  11.324 -                    /* if it's been less than a timeout, do the sysrq */
  11.325 -                    if (time_before(jiffies, sysrq_timeout)) {
  11.326 -                        spin_unlock_irqrestore(&xencons_lock, flags);
  11.327 -                        handle_sysrq(buf[i], regs, xencons_tty);
  11.328 -                        spin_lock_irqsave(&xencons_lock, flags);
  11.329 -                        continue;
  11.330 -                    }
  11.331 -                }
  11.332 -            }
  11.333 +		if (sysrq_enabled) {
  11.334 +			if (buf[i] == '\x0f') { /* ^O */
  11.335 +				sysrq_requested = jiffies;
  11.336 +				continue; /* don't print the sysrq key */
  11.337 +			} else if (sysrq_requested) {
  11.338 +				unsigned long sysrq_timeout =
  11.339 +					sysrq_requested + HZ*2;
  11.340 +				sysrq_requested = 0;
  11.341 +				if (time_before(jiffies, sysrq_timeout)) {
  11.342 +					spin_unlock_irqrestore(
  11.343 +						&xencons_lock, flags);
  11.344 +					handle_sysrq(
  11.345 +						buf[i], regs, xencons_tty);
  11.346 +					spin_lock_irqsave(
  11.347 +						&xencons_lock, flags);
  11.348 +					continue;
  11.349 +				}
  11.350 +			}
  11.351 +		}
  11.352  #endif
  11.353 -            tty_insert_flip_char(xencons_tty, buf[i], 0);
  11.354 -        }
  11.355 -        tty_flip_buffer_push(xencons_tty);
  11.356 -    }
  11.357 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.358 +		tty_insert_flip_char(xencons_tty, buf[i], 0);
  11.359 +	}
  11.360 +	tty_flip_buffer_push(xencons_tty);
  11.361  
  11.362 + out:
  11.363 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.364  }
  11.365  
  11.366  /* Privileged and non-privileged transmit worker. */
  11.367  static void __xencons_tx_flush(void)
  11.368  {
  11.369 -    int        sz, work_done = 0;
  11.370 +	int sz, work_done = 0;
  11.371  
  11.372 -    if ( xen_start_info->flags & SIF_INITDOMAIN )
  11.373 -    {
  11.374 -        if ( x_char )
  11.375 -        {
  11.376 -            kcons_write_dom0(NULL, &x_char, 1);
  11.377 -            x_char = 0;
  11.378 -            work_done = 1;
  11.379 -        }
  11.380 +	if (xen_start_info->flags & SIF_INITDOMAIN) {
  11.381 +		if (x_char) {
  11.382 +			kcons_write_dom0(NULL, &x_char, 1);
  11.383 +			x_char = 0;
  11.384 +			work_done = 1;
  11.385 +		}
  11.386  
  11.387 -        while ( wc != wp )
  11.388 -        {
  11.389 -            sz = wp - wc;
  11.390 -            if ( sz > (wbuf_size - WBUF_MASK(wc)) )
  11.391 -                sz = wbuf_size - WBUF_MASK(wc);
  11.392 -            kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
  11.393 -            wc += sz;
  11.394 -            work_done = 1;
  11.395 -        }
  11.396 -    }
  11.397 -    else
  11.398 -    {
  11.399 -        while ( x_char )
  11.400 -        {
  11.401 -	    if (xencons_ring_send(&x_char, 1) == 1) {
  11.402 -		x_char = 0;
  11.403 -		work_done = 1;
  11.404 -	    }
  11.405 -        }
  11.406 +		while (wc != wp) {
  11.407 +			sz = wp - wc;
  11.408 +			if (sz > (wbuf_size - WBUF_MASK(wc)))
  11.409 +				sz = wbuf_size - WBUF_MASK(wc);
  11.410 +			kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
  11.411 +			wc += sz;
  11.412 +			work_done = 1;
  11.413 +		}
  11.414 +	} else {
  11.415 +		while (x_char) {
  11.416 +			if (xencons_ring_send(&x_char, 1) == 1) {
  11.417 +				x_char = 0;
  11.418 +				work_done = 1;
  11.419 +			}
  11.420 +		}
  11.421  
  11.422 -        while ( wc != wp )
  11.423 -        {
  11.424 -	    int sent;
  11.425 -            sz = wp - wc;
  11.426 -	    if ( sz > (wbuf_size - WBUF_MASK(wc)) )
  11.427 -		sz = wbuf_size - WBUF_MASK(wc);
  11.428 -	    sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
  11.429 -	    if ( sent > 0 ) {
  11.430 -		wc += sent;
  11.431 -		work_done = 1;
  11.432 -	    }
  11.433 -        }
  11.434 -    }
  11.435 +		while (wc != wp) {
  11.436 +			int sent;
  11.437 +			sz = wp - wc;
  11.438 +			if (sz > (wbuf_size - WBUF_MASK(wc)))
  11.439 +				sz = wbuf_size - WBUF_MASK(wc);
  11.440 +			sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
  11.441 +			if (sent > 0) {
  11.442 +				wc += sent;
  11.443 +				work_done = 1;
  11.444 +			}
  11.445 +		}
  11.446 +	}
  11.447  
  11.448 -    if ( work_done && (xencons_tty != NULL) )
  11.449 -    {
  11.450 -        wake_up_interruptible(&xencons_tty->write_wait);
  11.451 -        if ( (xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
  11.452 -             (xencons_tty->ldisc.write_wakeup != NULL) )
  11.453 -            (xencons_tty->ldisc.write_wakeup)(xencons_tty);
  11.454 -    }
  11.455 +	if (work_done && (xencons_tty != NULL))
  11.456 +	{
  11.457 +		wake_up_interruptible(&xencons_tty->write_wait);
  11.458 +		if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
  11.459 +		    (xencons_tty->ldisc.write_wakeup != NULL))
  11.460 +			(xencons_tty->ldisc.write_wakeup)(xencons_tty);
  11.461 +	}
  11.462  }
  11.463  
  11.464  /* Privileged receive callback and transmit kicker. */
  11.465  static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
  11.466                                            struct pt_regs *regs)
  11.467  {
  11.468 -    static char   rbuf[16];
  11.469 -    int           i, l;
  11.470 -    unsigned long flags;
  11.471 +	static char   rbuf[16];
  11.472 +	int           i, l;
  11.473 +	unsigned long flags;
  11.474  
  11.475 -    spin_lock_irqsave(&xencons_lock, flags);
  11.476 +	spin_lock_irqsave(&xencons_lock, flags);
  11.477  
  11.478 -    if ( xencons_tty != NULL )
  11.479 -    {
  11.480 -        /* Receive work. */
  11.481 -        while ( (l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0 )
  11.482 -            for ( i = 0; i < l; i++ )
  11.483 -                tty_insert_flip_char(xencons_tty, rbuf[i], 0);
  11.484 -        if ( xencons_tty->flip.count != 0 )
  11.485 -            tty_flip_buffer_push(xencons_tty);
  11.486 -    }
  11.487 +	if (xencons_tty != NULL)
  11.488 +	{
  11.489 +		/* Receive work. */
  11.490 +		while ((l = HYPERVISOR_console_io(
  11.491 +			CONSOLEIO_read, 16, rbuf)) > 0)
  11.492 +			for (i = 0; i < l; i++)
  11.493 +				tty_insert_flip_char(xencons_tty, rbuf[i], 0);
  11.494 +		if (xencons_tty->flip.count != 0)
  11.495 +			tty_flip_buffer_push(xencons_tty);
  11.496 +	}
  11.497  
  11.498 -    /* Transmit work. */
  11.499 -    __xencons_tx_flush();
  11.500 +	/* Transmit work. */
  11.501 +	__xencons_tx_flush();
  11.502  
  11.503 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.504 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.505  
  11.506 -    return IRQ_HANDLED;
  11.507 +	return IRQ_HANDLED;
  11.508  }
  11.509  
  11.510  static int xencons_write_room(struct tty_struct *tty)
  11.511  {
  11.512 -    return wbuf_size - (wp - wc);
  11.513 +	return wbuf_size - (wp - wc);
  11.514  }
  11.515  
  11.516  static int xencons_chars_in_buffer(struct tty_struct *tty)
  11.517  {
  11.518 -    return wp - wc;
  11.519 +	return wp - wc;
  11.520  }
  11.521  
  11.522  static void xencons_send_xchar(struct tty_struct *tty, char ch)
  11.523  {
  11.524 -    unsigned long flags;
  11.525 +	unsigned long flags;
  11.526  
  11.527 -    if ( TTY_INDEX(tty) != 0 )
  11.528 -        return;
  11.529 +	if (TTY_INDEX(tty) != 0)
  11.530 +		return;
  11.531  
  11.532 -    spin_lock_irqsave(&xencons_lock, flags);
  11.533 -    x_char = ch;
  11.534 -    __xencons_tx_flush();
  11.535 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.536 +	spin_lock_irqsave(&xencons_lock, flags);
  11.537 +	x_char = ch;
  11.538 +	__xencons_tx_flush();
  11.539 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.540  }
  11.541  
  11.542  static void xencons_throttle(struct tty_struct *tty)
  11.543  {
  11.544 -    if ( TTY_INDEX(tty) != 0 )
  11.545 -        return;
  11.546 +	if (TTY_INDEX(tty) != 0)
  11.547 +		return;
  11.548  
  11.549 -    if ( I_IXOFF(tty) )
  11.550 -        xencons_send_xchar(tty, STOP_CHAR(tty));
  11.551 +	if (I_IXOFF(tty))
  11.552 +		xencons_send_xchar(tty, STOP_CHAR(tty));
  11.553  }
  11.554  
  11.555  static void xencons_unthrottle(struct tty_struct *tty)
  11.556  {
  11.557 -    if ( TTY_INDEX(tty) != 0 )
  11.558 -        return;
  11.559 +	if (TTY_INDEX(tty) != 0)
  11.560 +		return;
  11.561  
  11.562 -    if ( I_IXOFF(tty) )
  11.563 -    {
  11.564 -        if ( x_char != 0 )
  11.565 -            x_char = 0;
  11.566 -        else
  11.567 -            xencons_send_xchar(tty, START_CHAR(tty));
  11.568 -    }
  11.569 +	if (I_IXOFF(tty)) {
  11.570 +		if (x_char != 0)
  11.571 +			x_char = 0;
  11.572 +		else
  11.573 +			xencons_send_xchar(tty, START_CHAR(tty));
  11.574 +	}
  11.575  }
  11.576  
  11.577  static void xencons_flush_buffer(struct tty_struct *tty)
  11.578  {
  11.579 -    unsigned long flags;
  11.580 +	unsigned long flags;
  11.581  
  11.582 -    if ( TTY_INDEX(tty) != 0 )
  11.583 -        return;
  11.584 +	if (TTY_INDEX(tty) != 0)
  11.585 +		return;
  11.586  
  11.587 -    spin_lock_irqsave(&xencons_lock, flags);
  11.588 -    wc = wp = 0;
  11.589 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.590 +	spin_lock_irqsave(&xencons_lock, flags);
  11.591 +	wc = wp = 0;
  11.592 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.593  }
  11.594  
  11.595  static inline int __xencons_put_char(int ch)
  11.596  {
  11.597 -    char _ch = (char)ch;
  11.598 -    if ( (wp - wc) == wbuf_size )
  11.599 -        return 0;
  11.600 -    wbuf[WBUF_MASK(wp++)] = _ch;
  11.601 -    return 1;
  11.602 +	char _ch = (char)ch;
  11.603 +	if ((wp - wc) == wbuf_size)
  11.604 +		return 0;
  11.605 +	wbuf[WBUF_MASK(wp++)] = _ch;
  11.606 +	return 1;
  11.607  }
  11.608  
  11.609  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  11.610  static int xencons_write(
  11.611 -    struct tty_struct *tty,
  11.612 -    const unsigned char *buf,
  11.613 -    int count)
  11.614 +	struct tty_struct *tty,
  11.615 +	const unsigned char *buf,
  11.616 +	int count)
  11.617  {
  11.618 -    int i;
  11.619 -    unsigned long flags;
  11.620 +	int i;
  11.621 +	unsigned long flags;
  11.622  
  11.623 -    if ( TTY_INDEX(tty) != 0 )
  11.624 -        return count;
  11.625 +	if (TTY_INDEX(tty) != 0)
  11.626 +		return count;
  11.627  
  11.628 -    spin_lock_irqsave(&xencons_lock, flags);
  11.629 +	spin_lock_irqsave(&xencons_lock, flags);
  11.630  
  11.631 -    for ( i = 0; i < count; i++ )
  11.632 -        if ( !__xencons_put_char(buf[i]) )
  11.633 -            break;
  11.634 +	for (i = 0; i < count; i++)
  11.635 +		if (!__xencons_put_char(buf[i]))
  11.636 +			break;
  11.637  
  11.638 -    if ( i != 0 )
  11.639 -        __xencons_tx_flush();
  11.640 +	if (i != 0)
  11.641 +		__xencons_tx_flush();
  11.642  
  11.643 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.644 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.645  
  11.646 -    return i;
  11.647 +	return i;
  11.648  }
  11.649  #else
  11.650  static int xencons_write(
  11.651 -    struct tty_struct *tty, 
  11.652 -    int from_user,
  11.653 -    const u_char *buf, 
  11.654 -    int count)
  11.655 +	struct tty_struct *tty, 
  11.656 +	int from_user,
  11.657 +	const u_char *buf, 
  11.658 +	int count)
  11.659  {
  11.660 -    int i;
  11.661 -    unsigned long flags;
  11.662 +	int i;
  11.663 +	unsigned long flags;
  11.664  
  11.665 -    if ( from_user && verify_area(VERIFY_READ, buf, count) )
  11.666 -        return -EINVAL;
  11.667 +	if (from_user && verify_area(VERIFY_READ, buf, count))
  11.668 +		return -EINVAL;
  11.669  
  11.670 -    if ( TTY_INDEX(tty) != 0 )
  11.671 -        return count;
  11.672 +	if (TTY_INDEX(tty) != 0)
  11.673 +		return count;
  11.674  
  11.675 -    spin_lock_irqsave(&xencons_lock, flags);
  11.676 +	spin_lock_irqsave(&xencons_lock, flags);
  11.677  
  11.678 -    for ( i = 0; i < count; i++ )
  11.679 -    {
  11.680 -        char ch;
  11.681 -        if ( from_user )
  11.682 -            __get_user(ch, buf + i);
  11.683 -        else
  11.684 -            ch = buf[i];
  11.685 -        if ( !__xencons_put_char(ch) )
  11.686 -            break;
  11.687 -    }
  11.688 +	for (i = 0; i < count; i++) {
  11.689 +		char ch;
  11.690 +		if (from_user)
  11.691 +			__get_user(ch, buf + i);
  11.692 +		else
  11.693 +			ch = buf[i];
  11.694 +		if (!__xencons_put_char(ch))
  11.695 +			break;
  11.696 +	}
  11.697  
  11.698 -    if ( i != 0 )
  11.699 -        __xencons_tx_flush();
  11.700 +	if (i != 0)
  11.701 +		__xencons_tx_flush();
  11.702  
  11.703 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.704 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.705  
  11.706 -    return i;
  11.707 +	return i;
  11.708  }
  11.709  #endif
  11.710  
  11.711  static void xencons_put_char(struct tty_struct *tty, u_char ch)
  11.712  {
  11.713 -    unsigned long flags;
  11.714 +	unsigned long flags;
  11.715  
  11.716 -    if ( TTY_INDEX(tty) != 0 )
  11.717 -        return;
  11.718 +	if (TTY_INDEX(tty) != 0)
  11.719 +		return;
  11.720  
  11.721 -    spin_lock_irqsave(&xencons_lock, flags);
  11.722 -    (void)__xencons_put_char(ch);
  11.723 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.724 +	spin_lock_irqsave(&xencons_lock, flags);
  11.725 +	(void)__xencons_put_char(ch);
  11.726 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.727  }
  11.728  
  11.729  static void xencons_flush_chars(struct tty_struct *tty)
  11.730  {
  11.731 -    unsigned long flags;
  11.732 +	unsigned long flags;
  11.733  
  11.734 -    if ( TTY_INDEX(tty) != 0 )
  11.735 -        return;
  11.736 +	if (TTY_INDEX(tty) != 0)
  11.737 +		return;
  11.738  
  11.739 -    spin_lock_irqsave(&xencons_lock, flags);
  11.740 -    __xencons_tx_flush();
  11.741 -    spin_unlock_irqrestore(&xencons_lock, flags);    
  11.742 +	spin_lock_irqsave(&xencons_lock, flags);
  11.743 +	__xencons_tx_flush();
  11.744 +	spin_unlock_irqrestore(&xencons_lock, flags);    
  11.745  }
  11.746  
  11.747  static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
  11.748  {
  11.749 -    unsigned long orig_jiffies = jiffies;
  11.750 +	unsigned long orig_jiffies = jiffies;
  11.751  
  11.752 -    if ( TTY_INDEX(tty) != 0 )
  11.753 -        return;
  11.754 +	if (TTY_INDEX(tty) != 0)
  11.755 +		return;
  11.756  
  11.757 -    while ( DRV(tty->driver)->chars_in_buffer(tty) )
  11.758 -    {
  11.759 -        set_current_state(TASK_INTERRUPTIBLE);
  11.760 -        schedule_timeout(1);
  11.761 -        if ( signal_pending(current) )
  11.762 -            break;
  11.763 -        if ( (timeout != 0) && time_after(jiffies, orig_jiffies + timeout) )
  11.764 -            break;
  11.765 -    }
  11.766 +	while (DRV(tty->driver)->chars_in_buffer(tty))
  11.767 +	{
  11.768 +		set_current_state(TASK_INTERRUPTIBLE);
  11.769 +		schedule_timeout(1);
  11.770 +		if (signal_pending(current))
  11.771 +			break;
  11.772 +		if ( (timeout != 0) &&
  11.773 +		     time_after(jiffies, orig_jiffies + timeout) )
  11.774 +			break;
  11.775 +	}
  11.776      
  11.777 -    set_current_state(TASK_RUNNING);
  11.778 +	set_current_state(TASK_RUNNING);
  11.779  }
  11.780  
  11.781  static int xencons_open(struct tty_struct *tty, struct file *filp)
  11.782  {
  11.783 -    unsigned long flags;
  11.784 +	unsigned long flags;
  11.785  
  11.786 -    if ( TTY_INDEX(tty) != 0 )
  11.787 -        return 0;
  11.788 +	if (TTY_INDEX(tty) != 0)
  11.789 +		return 0;
  11.790  
  11.791 -    spin_lock_irqsave(&xencons_lock, flags);
  11.792 -    tty->driver_data = NULL;
  11.793 -    if ( xencons_tty == NULL )
  11.794 -        xencons_tty = tty;
  11.795 -    __xencons_tx_flush();
  11.796 -    spin_unlock_irqrestore(&xencons_lock, flags);    
  11.797 +	spin_lock_irqsave(&xencons_lock, flags);
  11.798 +	tty->driver_data = NULL;
  11.799 +	if (xencons_tty == NULL)
  11.800 +		xencons_tty = tty;
  11.801 +	__xencons_tx_flush();
  11.802 +	spin_unlock_irqrestore(&xencons_lock, flags);    
  11.803  
  11.804 -    return 0;
  11.805 +	return 0;
  11.806  }
  11.807  
  11.808  static void xencons_close(struct tty_struct *tty, struct file *filp)
  11.809  {
  11.810 -    unsigned long flags;
  11.811 +	unsigned long flags;
  11.812  
  11.813 -    if ( TTY_INDEX(tty) != 0 )
  11.814 -        return;
  11.815 +	if (TTY_INDEX(tty) != 0)
  11.816 +		return;
  11.817  
  11.818 -    if ( tty->count == 1 )
  11.819 -    {
  11.820 -        tty->closing = 1;
  11.821 -        tty_wait_until_sent(tty, 0);
  11.822 -        if ( DRV(tty->driver)->flush_buffer != NULL )
  11.823 -            DRV(tty->driver)->flush_buffer(tty);
  11.824 -        if ( tty->ldisc.flush_buffer != NULL )
  11.825 -            tty->ldisc.flush_buffer(tty);
  11.826 -        tty->closing = 0;
  11.827 -        spin_lock_irqsave(&xencons_lock, flags);
  11.828 -        xencons_tty = NULL;
  11.829 -        spin_unlock_irqrestore(&xencons_lock, flags);    
  11.830 -    }
  11.831 +	if (tty->count == 1) {
  11.832 +		tty->closing = 1;
  11.833 +		tty_wait_until_sent(tty, 0);
  11.834 +		if (DRV(tty->driver)->flush_buffer != NULL)
  11.835 +			DRV(tty->driver)->flush_buffer(tty);
  11.836 +		if (tty->ldisc.flush_buffer != NULL)
  11.837 +			tty->ldisc.flush_buffer(tty);
  11.838 +		tty->closing = 0;
  11.839 +		spin_lock_irqsave(&xencons_lock, flags);
  11.840 +		xencons_tty = NULL;
  11.841 +		spin_unlock_irqrestore(&xencons_lock, flags);    
  11.842 +	}
  11.843  }
  11.844  
  11.845  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  11.846  static struct tty_operations xencons_ops = {
  11.847 -    .open = xencons_open,
  11.848 -    .close = xencons_close,
  11.849 -    .write = xencons_write,
  11.850 -    .write_room = xencons_write_room,
  11.851 -    .put_char = xencons_put_char,
  11.852 -    .flush_chars = xencons_flush_chars,
  11.853 -    .chars_in_buffer = xencons_chars_in_buffer,
  11.854 -    .send_xchar = xencons_send_xchar,
  11.855 -    .flush_buffer = xencons_flush_buffer,
  11.856 -    .throttle = xencons_throttle,
  11.857 -    .unthrottle = xencons_unthrottle,
  11.858 -    .wait_until_sent = xencons_wait_until_sent,
  11.859 +	.open = xencons_open,
  11.860 +	.close = xencons_close,
  11.861 +	.write = xencons_write,
  11.862 +	.write_room = xencons_write_room,
  11.863 +	.put_char = xencons_put_char,
  11.864 +	.flush_chars = xencons_flush_chars,
  11.865 +	.chars_in_buffer = xencons_chars_in_buffer,
  11.866 +	.send_xchar = xencons_send_xchar,
  11.867 +	.flush_buffer = xencons_flush_buffer,
  11.868 +	.throttle = xencons_throttle,
  11.869 +	.unthrottle = xencons_unthrottle,
  11.870 +	.wait_until_sent = xencons_wait_until_sent,
  11.871  };
  11.872  
  11.873  #ifdef CONFIG_XEN_PRIVILEGED_GUEST
  11.874  static const char *xennullcon_startup(void)
  11.875  {
  11.876 -    return NULL;
  11.877 +	return NULL;
  11.878  }
  11.879  
  11.880  static int xennullcon_dummy(void)
  11.881  {
  11.882 -    return 0;
  11.883 +	return 0;
  11.884  }
  11.885  
  11.886  #define DUMMY (void *)xennullcon_dummy
  11.887 @@ -672,122 +665,128 @@ static int xennullcon_dummy(void)
  11.888   */
  11.889  
  11.890  const struct consw xennull_con = {
  11.891 -    .owner =		THIS_MODULE,
  11.892 -    .con_startup =	xennullcon_startup,
  11.893 -    .con_init =		DUMMY,
  11.894 -    .con_deinit =	DUMMY,
  11.895 -    .con_clear =	DUMMY,
  11.896 -    .con_putc =		DUMMY,
  11.897 -    .con_putcs =	DUMMY,
  11.898 -    .con_cursor =	DUMMY,
  11.899 -    .con_scroll =	DUMMY,
  11.900 -    .con_bmove =	DUMMY,
  11.901 -    .con_switch =	DUMMY,
  11.902 -    .con_blank =	DUMMY,
  11.903 -    .con_font_set =	DUMMY,
  11.904 -    .con_font_get =	DUMMY,
  11.905 -    .con_font_default =	DUMMY,
  11.906 -    .con_font_copy =	DUMMY,
  11.907 -    .con_set_palette =	DUMMY,
  11.908 -    .con_scrolldelta =	DUMMY,
  11.909 +	.owner =		THIS_MODULE,
  11.910 +	.con_startup =	xennullcon_startup,
  11.911 +	.con_init =		DUMMY,
  11.912 +	.con_deinit =	DUMMY,
  11.913 +	.con_clear =	DUMMY,
  11.914 +	.con_putc =		DUMMY,
  11.915 +	.con_putcs =	DUMMY,
  11.916 +	.con_cursor =	DUMMY,
  11.917 +	.con_scroll =	DUMMY,
  11.918 +	.con_bmove =	DUMMY,
  11.919 +	.con_switch =	DUMMY,
  11.920 +	.con_blank =	DUMMY,
  11.921 +	.con_font_set =	DUMMY,
  11.922 +	.con_font_get =	DUMMY,
  11.923 +	.con_font_default =	DUMMY,
  11.924 +	.con_font_copy =	DUMMY,
  11.925 +	.con_set_palette =	DUMMY,
  11.926 +	.con_scrolldelta =	DUMMY,
  11.927  };
  11.928  #endif
  11.929  #endif
  11.930  
  11.931  static int __init xencons_init(void)
  11.932  {
  11.933 -    int rc;
  11.934 +	int rc;
  11.935  
  11.936 -    if ( xc_mode == XC_OFF )
  11.937 -        return 0;
  11.938 +	if (xc_mode == XC_OFF)
  11.939 +		return 0;
  11.940  
  11.941 -    xencons_ring_init();
  11.942 +	xencons_ring_init();
  11.943  
  11.944  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  11.945 -    xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 
  11.946 -                                      1 : MAX_NR_CONSOLES);
  11.947 -    if ( xencons_driver == NULL )
  11.948 -        return -ENOMEM;
  11.949 +	xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 
  11.950 +					  1 : MAX_NR_CONSOLES);
  11.951 +	if (xencons_driver == NULL)
  11.952 +		return -ENOMEM;
  11.953  #else
  11.954 -    memset(&xencons_driver, 0, sizeof(struct tty_driver));
  11.955 -    xencons_driver.magic       = TTY_DRIVER_MAGIC;
  11.956 -    xencons_driver.refcount    = &xencons_refcount;
  11.957 -    xencons_driver.table       = xencons_table;
  11.958 -    xencons_driver.num         = (xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES;
  11.959 +	memset(&xencons_driver, 0, sizeof(struct tty_driver));
  11.960 +	xencons_driver.magic       = TTY_DRIVER_MAGIC;
  11.961 +	xencons_driver.refcount    = &xencons_refcount;
  11.962 +	xencons_driver.table       = xencons_table;
  11.963 +	xencons_driver.num         =
  11.964 +		(xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES;
  11.965  #endif
  11.966  
  11.967 -    DRV(xencons_driver)->major           = TTY_MAJOR;
  11.968 -    DRV(xencons_driver)->type            = TTY_DRIVER_TYPE_SERIAL;
  11.969 -    DRV(xencons_driver)->subtype         = SERIAL_TYPE_NORMAL;
  11.970 -    DRV(xencons_driver)->init_termios    = tty_std_termios;
  11.971 -    DRV(xencons_driver)->flags           = 
  11.972 -        TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS;
  11.973 -    DRV(xencons_driver)->termios         = xencons_termios;
  11.974 -    DRV(xencons_driver)->termios_locked  = xencons_termios_locked;
  11.975 +	DRV(xencons_driver)->major           = TTY_MAJOR;
  11.976 +	DRV(xencons_driver)->type            = TTY_DRIVER_TYPE_SERIAL;
  11.977 +	DRV(xencons_driver)->subtype         = SERIAL_TYPE_NORMAL;
  11.978 +	DRV(xencons_driver)->init_termios    = tty_std_termios;
  11.979 +	DRV(xencons_driver)->flags           = 
  11.980 +		TTY_DRIVER_REAL_RAW |
  11.981 +		TTY_DRIVER_RESET_TERMIOS |
  11.982 +		TTY_DRIVER_NO_DEVFS;
  11.983 +	DRV(xencons_driver)->termios         = xencons_termios;
  11.984 +	DRV(xencons_driver)->termios_locked  = xencons_termios_locked;
  11.985  
  11.986 -    if ( xc_mode == XC_SERIAL )
  11.987 -    {
  11.988 -        DRV(xencons_driver)->name        = "ttyS";
  11.989 -        DRV(xencons_driver)->minor_start = 64 + xc_num;
  11.990 -        DRV(xencons_driver)->name_base   = 0 + xc_num;
  11.991 -    }
  11.992 -    else
  11.993 -    {
  11.994 -        DRV(xencons_driver)->name        = "tty";
  11.995 -        DRV(xencons_driver)->minor_start = xc_num;
  11.996 -        DRV(xencons_driver)->name_base   = xc_num;
  11.997 -    }
  11.998 +	if (xc_mode == XC_SERIAL)
  11.999 +	{
 11.1000 +		DRV(xencons_driver)->name        = "ttyS";
 11.1001 +		DRV(xencons_driver)->minor_start = 64 + xc_num;
 11.1002 +		DRV(xencons_driver)->name_base   = 0 + xc_num;
 11.1003 +	} else {
 11.1004 +		DRV(xencons_driver)->name        = "tty";
 11.1005 +		DRV(xencons_driver)->minor_start = xc_num;
 11.1006 +		DRV(xencons_driver)->name_base   = xc_num;
 11.1007 +	}
 11.1008  
 11.1009  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 11.1010 -    tty_set_operations(xencons_driver, &xencons_ops);
 11.1011 +	tty_set_operations(xencons_driver, &xencons_ops);
 11.1012  #else
 11.1013 -    xencons_driver.open            = xencons_open;
 11.1014 -    xencons_driver.close           = xencons_close;
 11.1015 -    xencons_driver.write           = xencons_write;
 11.1016 -    xencons_driver.write_room      = xencons_write_room;
 11.1017 -    xencons_driver.put_char        = xencons_put_char;
 11.1018 -    xencons_driver.flush_chars     = xencons_flush_chars;
 11.1019 -    xencons_driver.chars_in_buffer = xencons_chars_in_buffer;
 11.1020 -    xencons_driver.send_xchar      = xencons_send_xchar;
 11.1021 -    xencons_driver.flush_buffer    = xencons_flush_buffer;
 11.1022 -    xencons_driver.throttle        = xencons_throttle;
 11.1023 -    xencons_driver.unthrottle      = xencons_unthrottle;
 11.1024 -    xencons_driver.wait_until_sent = xencons_wait_until_sent;
 11.1025 +	xencons_driver.open            = xencons_open;
 11.1026 +	xencons_driver.close           = xencons_close;
 11.1027 +	xencons_driver.write           = xencons_write;
 11.1028 +	xencons_driver.write_room      = xencons_write_room;
 11.1029 +	xencons_driver.put_char        = xencons_put_char;
 11.1030 +	xencons_driver.flush_chars     = xencons_flush_chars;
 11.1031 +	xencons_driver.chars_in_buffer = xencons_chars_in_buffer;
 11.1032 +	xencons_driver.send_xchar      = xencons_send_xchar;
 11.1033 +	xencons_driver.flush_buffer    = xencons_flush_buffer;
 11.1034 +	xencons_driver.throttle        = xencons_throttle;
 11.1035 +	xencons_driver.unthrottle      = xencons_unthrottle;
 11.1036 +	xencons_driver.wait_until_sent = xencons_wait_until_sent;
 11.1037  #endif
 11.1038  
 11.1039 -    if ( (rc = tty_register_driver(DRV(xencons_driver))) != 0 )
 11.1040 -    {
 11.1041 -        printk("WARNING: Failed to register Xen virtual "
 11.1042 -               "console driver as '%s%d'\n",
 11.1043 -               DRV(xencons_driver)->name, DRV(xencons_driver)->name_base);
 11.1044 +	if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) {
 11.1045 +		printk("WARNING: Failed to register Xen virtual "
 11.1046 +		       "console driver as '%s%d'\n",
 11.1047 +		       DRV(xencons_driver)->name, DRV(xencons_driver)->name_base);
 11.1048  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 11.1049 -        put_tty_driver(xencons_driver);
 11.1050 -        xencons_driver = NULL;
 11.1051 +		put_tty_driver(xencons_driver);
 11.1052 +		xencons_driver = NULL;
 11.1053  #endif
 11.1054 -        return rc;
 11.1055 -    }
 11.1056 +		return rc;
 11.1057 +	}
 11.1058  
 11.1059  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 11.1060 -    tty_register_device(xencons_driver, 0, NULL);
 11.1061 +	tty_register_device(xencons_driver, 0, NULL);
 11.1062  #endif
 11.1063  
 11.1064 -    if ( xen_start_info->flags & SIF_INITDOMAIN )
 11.1065 -    {
 11.1066 -        xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
 11.1067 -        (void)request_irq(xencons_priv_irq,
 11.1068 -                          xencons_priv_interrupt, 0, "console", NULL);
 11.1069 -    }
 11.1070 -    else
 11.1071 -    {
 11.1072 -	
 11.1073 -	xencons_ring_register_receiver(xencons_rx);
 11.1074 -    }
 11.1075 +	if (xen_start_info->flags & SIF_INITDOMAIN) {
 11.1076 +		xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
 11.1077 +		(void)request_irq(xencons_priv_irq,
 11.1078 +				  xencons_priv_interrupt, 0, "console", NULL);
 11.1079 +	} else {
 11.1080 +		xencons_ring_register_receiver(xencons_rx);
 11.1081 +	}
 11.1082  
 11.1083 -    printk("Xen virtual console successfully installed as %s%d\n",
 11.1084 -           DRV(xencons_driver)->name,
 11.1085 -           DRV(xencons_driver)->name_base );
 11.1086 +	printk("Xen virtual console successfully installed as %s%d\n",
 11.1087 +	       DRV(xencons_driver)->name,
 11.1088 +	       DRV(xencons_driver)->name_base );
 11.1089      
 11.1090 -    return 0;
 11.1091 +	return 0;
 11.1092  }
 11.1093  
 11.1094  module_init(xencons_init);
 11.1095 +
 11.1096 +/*
 11.1097 + * Local variables:
 11.1098 + *  c-file-style: "linux"
 11.1099 + *  indent-tabs-mode: t
 11.1100 + *  c-indent-level: 8
 11.1101 + *  c-basic-offset: 8
 11.1102 + *  tab-width: 8
 11.1103 + * End:
 11.1104 + */
    12.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c	Thu Sep 22 14:01:01 2005 +0100
    12.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c	Thu Sep 22 14:04:14 2005 +0100
    12.3 @@ -125,3 +125,13 @@ void xencons_resume(void)
    12.4  
    12.5  	(void)xencons_ring_init();
    12.6  }
    12.7 +
    12.8 +/*
    12.9 + * Local variables:
   12.10 + *  c-file-style: "linux"
   12.11 + *  indent-tabs-mode: t
   12.12 + *  c-indent-level: 8
   12.13 + *  c-basic-offset: 8
   12.14 + *  tab-width: 8
   12.15 + * End:
   12.16 + */
    13.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h	Thu Sep 22 14:01:01 2005 +0100
    13.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h	Thu Sep 22 14:04:14 2005 +0100
    13.3 @@ -3,12 +3,21 @@
    13.4  
    13.5  asmlinkage int xprintk(const char *fmt, ...);
    13.6  
    13.7 -
    13.8  int xencons_ring_init(void);
    13.9  int xencons_ring_send(const char *data, unsigned len);
   13.10  
   13.11 -typedef void (xencons_receiver_func)(char *buf, unsigned len, 
   13.12 -                                     struct pt_regs *regs);
   13.13 +typedef void (xencons_receiver_func)(
   13.14 +	char *buf, unsigned len, struct pt_regs *regs);
   13.15  void xencons_ring_register_receiver(xencons_receiver_func *f);
   13.16  
   13.17  #endif /* _XENCONS_RING_H */
   13.18 +
   13.19 +/*
   13.20 + * Local variables:
   13.21 + *  c-file-style: "linux"
   13.22 + *  indent-tabs-mode: t
   13.23 + *  c-indent-level: 8
   13.24 + *  c-basic-offset: 8
   13.25 + *  tab-width: 8
   13.26 + * End:
   13.27 + */
    14.1 --- a/linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c	Thu Sep 22 14:01:01 2005 +0100
    14.2 +++ b/linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c	Thu Sep 22 14:04:14 2005 +0100
    14.3 @@ -1,9 +1,9 @@
    14.4  /******************************************************************************
    14.5   * evtchn.c
    14.6   * 
    14.7 - * Xenolinux driver for receiving and demuxing event-channel signals.
    14.8 + * Driver for receiving and demuxing event-channel signals.
    14.9   * 
   14.10 - * Copyright (c) 2004, K A Fraser
   14.11 + * Copyright (c) 2004-2005, K A Fraser
   14.12   * Multi-process extensions Copyright (c) 2004, Steven Smith
   14.13   * 
   14.14   * This file may be distributed separately from the Linux kernel, or
   14.15 @@ -46,29 +46,18 @@
   14.16  #include <linux/init.h>
   14.17  #define XEN_EVTCHN_MASK_OPS
   14.18  #include <asm-xen/evtchn.h>
   14.19 -
   14.20 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
   14.21 -#include <linux/devfs_fs_kernel.h>
   14.22 -#define OLD_DEVFS
   14.23 -#else
   14.24  #include <linux/gfp.h>
   14.25 -#endif
   14.26 -
   14.27 -#ifdef OLD_DEVFS
   14.28 -/* NB. This must be shared amongst drivers if more things go in /dev/xen */
   14.29 -static devfs_handle_t xen_dev_dir;
   14.30 -#endif
   14.31  
   14.32  struct per_user_data {
   14.33 -    /* Notification ring, accessed via /dev/xen/evtchn. */
   14.34 -#   define EVTCHN_RING_SIZE     2048  /* 2048 16-bit entries */
   14.35 -#   define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
   14.36 -    u16 *ring;
   14.37 -    unsigned int ring_cons, ring_prod, ring_overflow;
   14.38 +	/* Notification ring, accessed via /dev/xen/evtchn. */
   14.39 +#define EVTCHN_RING_SIZE     2048  /* 2048 16-bit entries */
   14.40 +#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
   14.41 +	u16 *ring;
   14.42 +	unsigned int ring_cons, ring_prod, ring_overflow;
   14.43  
   14.44 -    /* Processes wait on this queue when ring is empty. */
   14.45 -    wait_queue_head_t evtchn_wait;
   14.46 -    struct fasync_struct *evtchn_async_queue;
   14.47 +	/* Processes wait on this queue when ring is empty. */
   14.48 +	wait_queue_head_t evtchn_wait;
   14.49 +	struct fasync_struct *evtchn_async_queue;
   14.50  };
   14.51  
   14.52  /* Who's bound to each port? */
   14.53 @@ -77,356 +66,310 @@ static spinlock_t port_user_lock;
   14.54  
   14.55  void evtchn_device_upcall(int port)
   14.56  {
   14.57 -    struct per_user_data *u;
   14.58 +	struct per_user_data *u;
   14.59  
   14.60 -    spin_lock(&port_user_lock);
   14.61 +	spin_lock(&port_user_lock);
   14.62  
   14.63 -    mask_evtchn(port);
   14.64 -    clear_evtchn(port);
   14.65 +	mask_evtchn(port);
   14.66 +	clear_evtchn(port);
   14.67  
   14.68 -    if ( (u = port_user[port]) != NULL )
   14.69 -    {
   14.70 -        if ( (u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE )
   14.71 -        {
   14.72 -            u->ring[EVTCHN_RING_MASK(u->ring_prod)] = (u16)port;
   14.73 -            if ( u->ring_cons == u->ring_prod++ )
   14.74 -            {
   14.75 -                wake_up_interruptible(&u->evtchn_wait);
   14.76 -                kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN);
   14.77 -            }
   14.78 -        }
   14.79 -        else
   14.80 -        {
   14.81 -            u->ring_overflow = 1;
   14.82 -        }
   14.83 -    }
   14.84 +	if ((u = port_user[port]) != NULL) {
   14.85 +		if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
   14.86 +			u->ring[EVTCHN_RING_MASK(u->ring_prod)] = (u16)port;
   14.87 +			if (u->ring_cons == u->ring_prod++) {
   14.88 +				wake_up_interruptible(&u->evtchn_wait);
   14.89 +				kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN);
   14.90 +			}
   14.91 +		} else {
   14.92 +			u->ring_overflow = 1;
   14.93 +		}
   14.94 +	}
   14.95  
   14.96 -    spin_unlock(&port_user_lock);
   14.97 +	spin_unlock(&port_user_lock);
   14.98  }
   14.99  
  14.100  static ssize_t evtchn_read(struct file *file, char *buf,
  14.101                             size_t count, loff_t *ppos)
  14.102  {
  14.103 -    int rc;
  14.104 -    unsigned int c, p, bytes1 = 0, bytes2 = 0;
  14.105 -    DECLARE_WAITQUEUE(wait, current);
  14.106 -    struct per_user_data *u = file->private_data;
  14.107 +	int rc;
  14.108 +	unsigned int c, p, bytes1 = 0, bytes2 = 0;
  14.109 +	DECLARE_WAITQUEUE(wait, current);
  14.110 +	struct per_user_data *u = file->private_data;
  14.111  
  14.112 -    add_wait_queue(&u->evtchn_wait, &wait);
  14.113 -
  14.114 -    count &= ~1; /* even number of bytes */
  14.115 +	add_wait_queue(&u->evtchn_wait, &wait);
  14.116  
  14.117 -    if ( count == 0 )
  14.118 -    {
  14.119 -        rc = 0;
  14.120 -        goto out;
  14.121 -    }
  14.122 +	count &= ~1; /* even number of bytes */
  14.123  
  14.124 -    if ( count > PAGE_SIZE )
  14.125 -        count = PAGE_SIZE;
  14.126 +	if (count == 0) {
  14.127 +		rc = 0;
  14.128 +		goto out;
  14.129 +	}
  14.130  
  14.131 -    for ( ; ; )
  14.132 -    {
  14.133 -        set_current_state(TASK_INTERRUPTIBLE);
  14.134 +	if (count > PAGE_SIZE)
  14.135 +		count = PAGE_SIZE;
  14.136  
  14.137 -        if ( (c = u->ring_cons) != (p = u->ring_prod) )
  14.138 -            break;
  14.139 +	for (;;) {
  14.140 +		set_current_state(TASK_INTERRUPTIBLE);
  14.141 +
  14.142 +		if ((c = u->ring_cons) != (p = u->ring_prod))
  14.143 +			break;
  14.144  
  14.145 -        if ( u->ring_overflow )
  14.146 -        {
  14.147 -            rc = -EFBIG;
  14.148 -            goto out;
  14.149 -        }
  14.150 +		if (u->ring_overflow) {
  14.151 +			rc = -EFBIG;
  14.152 +			goto out;
  14.153 +		}
  14.154  
  14.155 -        if ( file->f_flags & O_NONBLOCK )
  14.156 -        {
  14.157 -            rc = -EAGAIN;
  14.158 -            goto out;
  14.159 -        }
  14.160 +		if (file->f_flags & O_NONBLOCK) {
  14.161 +			rc = -EAGAIN;
  14.162 +			goto out;
  14.163 +		}
  14.164  
  14.165 -        if ( signal_pending(current) )
  14.166 -        {
  14.167 -            rc = -ERESTARTSYS;
  14.168 -            goto out;
  14.169 -        }
  14.170 +		if (signal_pending(current)) {
  14.171 +			rc = -ERESTARTSYS;
  14.172 +			goto out;
  14.173 +		}
  14.174  
  14.175 -        schedule();
  14.176 -    }
  14.177 +		schedule();
  14.178 +	}
  14.179  
  14.180 -    /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
  14.181 -    if ( ((c ^ p) & EVTCHN_RING_SIZE) != 0 )
  14.182 -    {
  14.183 -        bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * sizeof(u16);
  14.184 -        bytes2 = EVTCHN_RING_MASK(p) * sizeof(u16);
  14.185 -    }
  14.186 -    else
  14.187 -    {
  14.188 -        bytes1 = (p - c) * sizeof(u16);
  14.189 -        bytes2 = 0;
  14.190 -    }
  14.191 +	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
  14.192 +	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
  14.193 +		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
  14.194 +			sizeof(u16);
  14.195 +		bytes2 = EVTCHN_RING_MASK(p) * sizeof(u16);
  14.196 +	} else {
  14.197 +		bytes1 = (p - c) * sizeof(u16);
  14.198 +		bytes2 = 0;
  14.199 +	}
  14.200  
  14.201 -    /* Truncate chunks according to caller's maximum byte count. */
  14.202 -    if ( bytes1 > count )
  14.203 -    {
  14.204 -        bytes1 = count;
  14.205 -        bytes2 = 0;
  14.206 -    }
  14.207 -    else if ( (bytes1 + bytes2) > count )
  14.208 -    {
  14.209 -        bytes2 = count - bytes1;
  14.210 -    }
  14.211 +	/* Truncate chunks according to caller's maximum byte count. */
  14.212 +	if (bytes1 > count) {
  14.213 +		bytes1 = count;
  14.214 +		bytes2 = 0;
  14.215 +	} else if ((bytes1 + bytes2) > count) {
  14.216 +		bytes2 = count - bytes1;
  14.217 +	}
  14.218  
  14.219 -    if ( copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
  14.220 -         ((bytes2 != 0) && copy_to_user(&buf[bytes1], &u->ring[0], bytes2)) )
  14.221 -    {
  14.222 -        rc = -EFAULT;
  14.223 -        goto out;
  14.224 -    }
  14.225 +	if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
  14.226 +	    ((bytes2 != 0) &&
  14.227 +	     copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) {
  14.228 +		rc = -EFAULT;
  14.229 +		goto out;
  14.230 +	}
  14.231  
  14.232 -    u->ring_cons += (bytes1 + bytes2) / sizeof(u16);
  14.233 +	u->ring_cons += (bytes1 + bytes2) / sizeof(u16);
  14.234  
  14.235 -    rc = bytes1 + bytes2;
  14.236 +	rc = bytes1 + bytes2;
  14.237  
  14.238   out:
  14.239 -    __set_current_state(TASK_RUNNING);
  14.240 -    remove_wait_queue(&u->evtchn_wait, &wait);
  14.241 -    return rc;
  14.242 +	__set_current_state(TASK_RUNNING);
  14.243 +	remove_wait_queue(&u->evtchn_wait, &wait);
  14.244 +	return rc;
  14.245  }
  14.246  
  14.247  static ssize_t evtchn_write(struct file *file, const char *buf,
  14.248                              size_t count, loff_t *ppos)
  14.249  {
  14.250 -    int  rc, i;
  14.251 -    u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
  14.252 -    struct per_user_data *u = file->private_data;
  14.253 +	int  rc, i;
  14.254 +	u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
  14.255 +	struct per_user_data *u = file->private_data;
  14.256  
  14.257 -    if ( kbuf == NULL )
  14.258 -        return -ENOMEM;
  14.259 +	if (kbuf == NULL)
  14.260 +		return -ENOMEM;
  14.261  
  14.262 -    count &= ~1; /* even number of bytes */
  14.263 +	count &= ~1; /* even number of bytes */
  14.264  
  14.265 -    if ( count == 0 )
  14.266 -    {
  14.267 -        rc = 0;
  14.268 -        goto out;
  14.269 -    }
  14.270 +	if (count == 0) {
  14.271 +		rc = 0;
  14.272 +		goto out;
  14.273 +	}
  14.274  
  14.275 -    if ( count > PAGE_SIZE )
  14.276 -        count = PAGE_SIZE;
  14.277 +	if (count > PAGE_SIZE)
  14.278 +		count = PAGE_SIZE;
  14.279 +
  14.280 +	if (copy_from_user(kbuf, buf, count) != 0) {
  14.281 +		rc = -EFAULT;
  14.282 +		goto out;
  14.283 +	}
  14.284  
  14.285 -    if ( copy_from_user(kbuf, buf, count) != 0 )
  14.286 -    {
  14.287 -        rc = -EFAULT;
  14.288 -        goto out;
  14.289 -    }
  14.290 +	spin_lock_irq(&port_user_lock);
  14.291 +	for (i = 0; i < (count/2); i++)
  14.292 +		if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
  14.293 +			unmask_evtchn(kbuf[i]);
  14.294 +	spin_unlock_irq(&port_user_lock);
  14.295  
  14.296 -    spin_lock_irq(&port_user_lock);
  14.297 -    for ( i = 0; i < (count/2); i++ )
  14.298 -        if ( (kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u) )
  14.299 -            unmask_evtchn(kbuf[i]);
  14.300 -    spin_unlock_irq(&port_user_lock);
  14.301 -
  14.302 -    rc = count;
  14.303 +	rc = count;
  14.304  
  14.305   out:
  14.306 -    free_page((unsigned long)kbuf);
  14.307 -    return rc;
  14.308 +	free_page((unsigned long)kbuf);
  14.309 +	return rc;
  14.310  }
  14.311  
  14.312  static int evtchn_ioctl(struct inode *inode, struct file *file,
  14.313                          unsigned int cmd, unsigned long arg)
  14.314  {
  14.315 -    int rc = 0;
  14.316 -    struct per_user_data *u = file->private_data;
  14.317 +	int rc = 0;
  14.318 +	struct per_user_data *u = file->private_data;
  14.319  
  14.320 -    spin_lock_irq(&port_user_lock);
  14.321 +	spin_lock_irq(&port_user_lock);
  14.322      
  14.323 -    switch ( cmd )
  14.324 -    {
  14.325 -    case EVTCHN_RESET:
  14.326 -        /* Initialise the ring to empty. Clear errors. */
  14.327 -        u->ring_cons = u->ring_prod = u->ring_overflow = 0;
  14.328 -        break;
  14.329 +	switch (cmd) {
  14.330 +	case EVTCHN_RESET:
  14.331 +		/* Initialise the ring to empty. Clear errors. */
  14.332 +		u->ring_cons = u->ring_prod = u->ring_overflow = 0;
  14.333 +		break;
  14.334  
  14.335 -    case EVTCHN_BIND:
  14.336 -        if ( arg >= NR_EVENT_CHANNELS )
  14.337 -        {
  14.338 -            rc = -EINVAL;
  14.339 -        }
  14.340 -        else if ( port_user[arg] != NULL )
  14.341 -        {
  14.342 -            rc = -EISCONN;
  14.343 -        }
  14.344 -        else
  14.345 -        {
  14.346 -            port_user[arg] = u;
  14.347 -            unmask_evtchn(arg);
  14.348 -        }
  14.349 -        break;
  14.350 +	case EVTCHN_BIND:
  14.351 +		if (arg >= NR_EVENT_CHANNELS) {
  14.352 +			rc = -EINVAL;
  14.353 +		} else if (port_user[arg] != NULL) {
  14.354 +			rc = -EISCONN;
  14.355 +		} else {
  14.356 +			port_user[arg] = u;
  14.357 +			unmask_evtchn(arg);
  14.358 +		}
  14.359 +		break;
  14.360  
  14.361 -    case EVTCHN_UNBIND:
  14.362 -        if ( arg >= NR_EVENT_CHANNELS )
  14.363 -        {
  14.364 -            rc = -EINVAL;
  14.365 -        }
  14.366 -        else if ( port_user[arg] != u )
  14.367 -        {
  14.368 -            rc = -ENOTCONN;
  14.369 -        }
  14.370 -        else
  14.371 -        {
  14.372 -            port_user[arg] = NULL;
  14.373 -            mask_evtchn(arg);
  14.374 -        }
  14.375 -        break;
  14.376 +	case EVTCHN_UNBIND:
  14.377 +		if (arg >= NR_EVENT_CHANNELS) {
  14.378 +			rc = -EINVAL;
  14.379 +		} else if (port_user[arg] != u) {
  14.380 +			rc = -ENOTCONN;
  14.381 +		} else {
  14.382 +			port_user[arg] = NULL;
  14.383 +			mask_evtchn(arg);
  14.384 +		}
  14.385 +		break;
  14.386  
  14.387 -    default:
  14.388 -        rc = -ENOSYS;
  14.389 -        break;
  14.390 -    }
  14.391 +	default:
  14.392 +		rc = -ENOSYS;
  14.393 +		break;
  14.394 +	}
  14.395  
  14.396 -    spin_unlock_irq(&port_user_lock);   
  14.397 +	spin_unlock_irq(&port_user_lock);   
  14.398  
  14.399 -    return rc;
  14.400 +	return rc;
  14.401  }
  14.402  
  14.403  static unsigned int evtchn_poll(struct file *file, poll_table *wait)
  14.404  {
  14.405 -    unsigned int mask = POLLOUT | POLLWRNORM;
  14.406 -    struct per_user_data *u = file->private_data;
  14.407 +	unsigned int mask = POLLOUT | POLLWRNORM;
  14.408 +	struct per_user_data *u = file->private_data;
  14.409  
  14.410 -    poll_wait(file, &u->evtchn_wait, wait);
  14.411 -    if ( u->ring_cons != u->ring_prod )
  14.412 -        mask |= POLLIN | POLLRDNORM;
  14.413 -    if ( u->ring_overflow )
  14.414 -        mask = POLLERR;
  14.415 -    return mask;
  14.416 +	poll_wait(file, &u->evtchn_wait, wait);
  14.417 +	if (u->ring_cons != u->ring_prod)
  14.418 +		mask |= POLLIN | POLLRDNORM;
  14.419 +	if (u->ring_overflow)
  14.420 +		mask = POLLERR;
  14.421 +	return mask;
  14.422  }
  14.423  
  14.424  static int evtchn_fasync(int fd, struct file *filp, int on)
  14.425  {
  14.426 -    struct per_user_data *u = filp->private_data;
  14.427 -    return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
  14.428 +	struct per_user_data *u = filp->private_data;
  14.429 +	return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
  14.430  }
  14.431  
  14.432  static int evtchn_open(struct inode *inode, struct file *filp)
  14.433  {
  14.434 -    struct per_user_data *u;
  14.435 +	struct per_user_data *u;
  14.436  
  14.437 -    if ( (u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL )
  14.438 -        return -ENOMEM;
  14.439 +	if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL)
  14.440 +		return -ENOMEM;
  14.441  
  14.442 -    memset(u, 0, sizeof(*u));
  14.443 -    init_waitqueue_head(&u->evtchn_wait);
  14.444 +	memset(u, 0, sizeof(*u));
  14.445 +	init_waitqueue_head(&u->evtchn_wait);
  14.446  
  14.447 -    if ( (u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL )
  14.448 -    {
  14.449 -        kfree(u);
  14.450 -        return -ENOMEM;
  14.451 -    }
  14.452 +	if ((u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL)
  14.453 +	{
  14.454 +		kfree(u);
  14.455 +		return -ENOMEM;
  14.456 +	}
  14.457  
  14.458 -    filp->private_data = u;
  14.459 +	filp->private_data = u;
  14.460  
  14.461 -    return 0;
  14.462 +	return 0;
  14.463  }
  14.464  
  14.465  static int evtchn_release(struct inode *inode, struct file *filp)
  14.466  {
  14.467 -    int i;
  14.468 -    struct per_user_data *u = filp->private_data;
  14.469 +	int i;
  14.470 +	struct per_user_data *u = filp->private_data;
  14.471  
  14.472 -    spin_lock_irq(&port_user_lock);
  14.473 +	spin_lock_irq(&port_user_lock);
  14.474  
  14.475 -    free_page((unsigned long)u->ring);
  14.476 +	free_page((unsigned long)u->ring);
  14.477  
  14.478 -    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
  14.479 -    {
  14.480 -        if ( port_user[i] == u )
  14.481 -        {
  14.482 -            port_user[i] = NULL;
  14.483 -            mask_evtchn(i);
  14.484 -        }
  14.485 -    }
  14.486 +	for (i = 0; i < NR_EVENT_CHANNELS; i++)
  14.487 +	{
  14.488 +		if (port_user[i] == u)
  14.489 +		{
  14.490 +			port_user[i] = NULL;
  14.491 +			mask_evtchn(i);
  14.492 +		}
  14.493 +	}
  14.494  
  14.495 -    spin_unlock_irq(&port_user_lock);
  14.496 +	spin_unlock_irq(&port_user_lock);
  14.497  
  14.498 -    kfree(u);
  14.499 +	kfree(u);
  14.500  
  14.501 -    return 0;
  14.502 +	return 0;
  14.503  }
  14.504  
  14.505  static struct file_operations evtchn_fops = {
  14.506 -    .owner   = THIS_MODULE,
  14.507 -    .read    = evtchn_read,
  14.508 -    .write   = evtchn_write,
  14.509 -    .ioctl   = evtchn_ioctl,
  14.510 -    .poll    = evtchn_poll,
  14.511 -    .fasync  = evtchn_fasync,
  14.512 -    .open    = evtchn_open,
  14.513 -    .release = evtchn_release,
  14.514 +	.owner   = THIS_MODULE,
  14.515 +	.read    = evtchn_read,
  14.516 +	.write   = evtchn_write,
  14.517 +	.ioctl   = evtchn_ioctl,
  14.518 +	.poll    = evtchn_poll,
  14.519 +	.fasync  = evtchn_fasync,
  14.520 +	.open    = evtchn_open,
  14.521 +	.release = evtchn_release,
  14.522  };
  14.523  
  14.524  static struct miscdevice evtchn_miscdev = {
  14.525 -    .minor        = EVTCHN_MINOR,
  14.526 -    .name         = "evtchn",
  14.527 -    .fops         = &evtchn_fops,
  14.528 +	.minor        = EVTCHN_MINOR,
  14.529 +	.name         = "evtchn",
  14.530 +	.fops         = &evtchn_fops,
  14.531  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  14.532 -    .devfs_name   = "misc/evtchn",
  14.533 +	.devfs_name   = "misc/evtchn",
  14.534  #endif
  14.535  };
  14.536  
  14.537  static int __init evtchn_init(void)
  14.538  {
  14.539 -#ifdef OLD_DEVFS
  14.540 -    devfs_handle_t symlink_handle;
  14.541 -    int            pos;
  14.542 -    char           link_dest[64];
  14.543 -#endif
  14.544 -    int err;
  14.545 -
  14.546 -    spin_lock_init(&port_user_lock);
  14.547 -    memset(port_user, 0, sizeof(port_user));
  14.548 +	int err;
  14.549  
  14.550 -    /* (DEVFS) create '/dev/misc/evtchn'. */
  14.551 -    err = misc_register(&evtchn_miscdev);
  14.552 -    if ( err != 0 )
  14.553 -    {
  14.554 -        printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
  14.555 -        return err;
  14.556 -    }
  14.557 -
  14.558 -#ifdef OLD_DEVFS
  14.559 -    /* (DEVFS) create directory '/dev/xen'. */
  14.560 -    xen_dev_dir = devfs_mk_dir(NULL, "xen", NULL);
  14.561 +	spin_lock_init(&port_user_lock);
  14.562 +	memset(port_user, 0, sizeof(port_user));
  14.563  
  14.564 -    /* (DEVFS) &link_dest[pos] == '../misc/evtchn'. */
  14.565 -    pos = devfs_generate_path(evtchn_miscdev.devfs_handle, 
  14.566 -                              &link_dest[3], 
  14.567 -                              sizeof(link_dest) - 3);
  14.568 -    if ( pos >= 0 )
  14.569 -        strncpy(&link_dest[pos], "../", 3);
  14.570 +	/* (DEVFS) create '/dev/misc/evtchn'. */
  14.571 +	err = misc_register(&evtchn_miscdev);
  14.572 +	if (err != 0)
  14.573 +	{
  14.574 +		printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
  14.575 +		return err;
  14.576 +	}
  14.577  
  14.578 -    /* (DEVFS) symlink '/dev/xen/evtchn' -> '../misc/evtchn'. */
  14.579 -    (void)devfs_mk_symlink(xen_dev_dir, 
  14.580 -                           "evtchn", 
  14.581 -                           DEVFS_FL_DEFAULT, 
  14.582 -                           &link_dest[pos],
  14.583 -                           &symlink_handle, 
  14.584 -                           NULL);
  14.585 +	printk("Event-channel device installed.\n");
  14.586  
  14.587 -    /* (DEVFS) automatically destroy the symlink with its destination. */
  14.588 -    devfs_auto_unregister(evtchn_miscdev.devfs_handle, symlink_handle);
  14.589 -#endif
  14.590 -
  14.591 -    printk("Event-channel device installed.\n");
  14.592 -
  14.593 -    return 0;
  14.594 +	return 0;
  14.595  }
  14.596  
  14.597  static void evtchn_cleanup(void)
  14.598  {
  14.599 -    misc_deregister(&evtchn_miscdev);
  14.600 +	misc_deregister(&evtchn_miscdev);
  14.601  }
  14.602  
  14.603  module_init(evtchn_init);
  14.604  module_exit(evtchn_cleanup);
  14.605 +
  14.606 +/*
  14.607 + * Local variables:
  14.608 + *  c-file-style: "linux"
  14.609 + *  indent-tabs-mode: t
  14.610 + *  c-indent-level: 8
  14.611 + *  c-basic-offset: 8
  14.612 + *  tab-width: 8
  14.613 + * End:
  14.614 + */
    15.1 --- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Thu Sep 22 14:01:01 2005 +0100
    15.2 +++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Thu Sep 22 14:04:14 2005 +0100
    15.3 @@ -41,232 +41,253 @@ static struct proc_dir_entry *privcmd_in
    15.4  static int privcmd_ioctl(struct inode *inode, struct file *file,
    15.5                           unsigned int cmd, unsigned long data)
    15.6  {
    15.7 -    int ret = -ENOSYS;
    15.8 +	int ret = -ENOSYS;
    15.9  
   15.10 -    switch ( cmd )
   15.11 -    {
   15.12 -    case IOCTL_PRIVCMD_HYPERCALL:
   15.13 -    {
   15.14 -        privcmd_hypercall_t hypercall;
   15.15 +	switch (cmd) {
   15.16 +	case IOCTL_PRIVCMD_HYPERCALL: {
   15.17 +		privcmd_hypercall_t hypercall;
   15.18    
   15.19 -        if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
   15.20 -            return -EFAULT;
   15.21 +		if (copy_from_user(&hypercall, (void *)data,
   15.22 +				   sizeof(hypercall)))
   15.23 +			return -EFAULT;
   15.24  
   15.25  #if defined(__i386__)
   15.26 -        __asm__ __volatile__ (
   15.27 -            "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
   15.28 -            "movl  4(%%eax),%%ebx ;"
   15.29 -            "movl  8(%%eax),%%ecx ;"
   15.30 -            "movl 12(%%eax),%%edx ;"
   15.31 -            "movl 16(%%eax),%%esi ;"
   15.32 -            "movl 20(%%eax),%%edi ;"
   15.33 -            "movl   (%%eax),%%eax ;"
   15.34 -            TRAP_INSTR "; "
   15.35 -            "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
   15.36 -            : "=a" (ret) : "0" (&hypercall) : "memory" );
   15.37 +		__asm__ __volatile__ (
   15.38 +			"pushl %%ebx; pushl %%ecx; pushl %%edx; "
   15.39 +			"pushl %%esi; pushl %%edi; "
   15.40 +			"movl  4(%%eax),%%ebx ;"
   15.41 +			"movl  8(%%eax),%%ecx ;"
   15.42 +			"movl 12(%%eax),%%edx ;"
   15.43 +			"movl 16(%%eax),%%esi ;"
   15.44 +			"movl 20(%%eax),%%edi ;"
   15.45 +			"movl   (%%eax),%%eax ;"
   15.46 +			TRAP_INSTR "; "
   15.47 +			"popl %%edi; popl %%esi; popl %%edx; "
   15.48 +			"popl %%ecx; popl %%ebx"
   15.49 +			: "=a" (ret) : "0" (&hypercall) : "memory" );
   15.50  #elif defined (__x86_64__)
   15.51 -        {
   15.52 -            long ign1, ign2, ign3;
   15.53 -            __asm__ __volatile__ (
   15.54 -                "movq %8,%%r10; movq %9,%%r8;" TRAP_INSTR
   15.55 -                : "=a" (ret), "=D" (ign1), "=S" (ign2), "=d" (ign3)
   15.56 -                : "0" ((unsigned long)hypercall.op), 
   15.57 -                "1" ((unsigned long)hypercall.arg[0]), 
   15.58 -                "2" ((unsigned long)hypercall.arg[1]),
   15.59 -                "3" ((unsigned long)hypercall.arg[2]), 
   15.60 -                "g" ((unsigned long)hypercall.arg[3]),
   15.61 -                "g" ((unsigned long)hypercall.arg[4])
   15.62 -                : "r11","rcx","r8","r10","memory");
   15.63 -        }
   15.64 +		{
   15.65 +			long ign1, ign2, ign3;
   15.66 +			__asm__ __volatile__ (
   15.67 +				"movq %8,%%r10; movq %9,%%r8;" TRAP_INSTR
   15.68 +				: "=a" (ret), "=D" (ign1),
   15.69 +				  "=S" (ign2), "=d" (ign3)
   15.70 +				: "0" ((unsigned long)hypercall.op), 
   15.71 +				"1" ((unsigned long)hypercall.arg[0]), 
   15.72 +				"2" ((unsigned long)hypercall.arg[1]),
   15.73 +				"3" ((unsigned long)hypercall.arg[2]), 
   15.74 +				"g" ((unsigned long)hypercall.arg[3]),
   15.75 +				"g" ((unsigned long)hypercall.arg[4])
   15.76 +				: "r11","rcx","r8","r10","memory");
   15.77 +		}
   15.78  #elif defined (__ia64__)
   15.79 -       __asm__ __volatile__ (
   15.80 -           ";; mov r14=%2; mov r15=%3; mov r16=%4; mov r17=%5; mov r18=%6;"
   15.81 -           "mov r2=%1; break 0x1000;; mov %0=r8 ;;"
   15.82 -           : "=r" (ret)
   15.83 -           : "r" (hypercall.op),
   15.84 -             "r" (hypercall.arg[0]),
   15.85 -             "r" (hypercall.arg[1]),
   15.86 -             "r" (hypercall.arg[2]),
   15.87 -             "r" (hypercall.arg[3]),
   15.88 -             "r" (hypercall.arg[4])
   15.89 -           : "r14","r15","r16","r17","r18","r2","r8","memory");
   15.90 +		__asm__ __volatile__ (
   15.91 +			";; mov r14=%2; mov r15=%3; "
   15.92 +			"mov r16=%4; mov r17=%5; mov r18=%6;"
   15.93 +			"mov r2=%1; break 0x1000;; mov %0=r8 ;;"
   15.94 +			: "=r" (ret)
   15.95 +			: "r" (hypercall.op),
   15.96 +			"r" (hypercall.arg[0]),
   15.97 +			"r" (hypercall.arg[1]),
   15.98 +			"r" (hypercall.arg[2]),
   15.99 +			"r" (hypercall.arg[3]),
  15.100 +			"r" (hypercall.arg[4])
  15.101 +			: "r14","r15","r16","r17","r18","r2","r8","memory");
  15.102  #endif
  15.103 -    }
  15.104 -    break;
  15.105 +	}
  15.106 +	break;
  15.107  
  15.108  #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
  15.109 -    case IOCTL_PRIVCMD_MMAP:
  15.110 -    {
  15.111 +	case IOCTL_PRIVCMD_MMAP: {
  15.112  #define PRIVCMD_MMAP_SZ 32
  15.113 -        privcmd_mmap_t mmapcmd;
  15.114 -        privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
  15.115 -        int i, rc;
  15.116 +		privcmd_mmap_t mmapcmd;
  15.117 +		privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
  15.118 +		int i, rc;
  15.119  
  15.120 -        if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) )
  15.121 -            return -EFAULT;
  15.122 +		if (copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)))
  15.123 +			return -EFAULT;
  15.124  
  15.125 -        p = mmapcmd.entry;
  15.126 +		p = mmapcmd.entry;
  15.127  
  15.128 -        for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ)
  15.129 -        {
  15.130 -            int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
  15.131 -                PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
  15.132 +		for (i = 0; i < mmapcmd.num;
  15.133 +		     i += PRIVCMD_MMAP_SZ, p += PRIVCMD_MMAP_SZ) {
  15.134 +			int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
  15.135 +				PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
  15.136  
  15.137 -
  15.138 -            if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
  15.139 -                return -EFAULT;
  15.140 +			if (copy_from_user(&msg, p,
  15.141 +					   n*sizeof(privcmd_mmap_entry_t)))
  15.142 +				return -EFAULT;
  15.143       
  15.144 -            for ( j = 0; j < n; j++ )
  15.145 -            {
  15.146 -                struct vm_area_struct *vma = 
  15.147 -                    find_vma( current->mm, msg[j].va );
  15.148 +			for (j = 0; j < n; j++) {
  15.149 +				struct vm_area_struct *vma = 
  15.150 +					find_vma( current->mm, msg[j].va );
  15.151 +
  15.152 +				if (!vma)
  15.153 +					return -EINVAL;
  15.154  
  15.155 -                if ( !vma )
  15.156 -                    return -EINVAL;
  15.157 +				if (msg[j].va > PAGE_OFFSET)
  15.158 +					return -EINVAL;
  15.159  
  15.160 -                if ( msg[j].va > PAGE_OFFSET )
  15.161 -                    return -EINVAL;
  15.162 +				if ((msg[j].va + (msg[j].npages << PAGE_SHIFT))
  15.163 +				    > vma->vm_end )
  15.164 +					return -EINVAL;
  15.165  
  15.166 -                if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
  15.167 -                    return -EINVAL;
  15.168 -
  15.169 -                if ( (rc = direct_remap_pfn_range(vma,
  15.170 -                                                  msg[j].va&PAGE_MASK, 
  15.171 -                                                  msg[j].mfn, 
  15.172 -                                                  msg[j].npages<<PAGE_SHIFT, 
  15.173 -                                                  vma->vm_page_prot,
  15.174 -                                                  mmapcmd.dom)) < 0 )
  15.175 -                    return rc;
  15.176 -            }
  15.177 -        }
  15.178 -        ret = 0;
  15.179 -    }
  15.180 -    break;
  15.181 +				if ((rc = direct_remap_pfn_range(
  15.182 +					vma,
  15.183 +					msg[j].va&PAGE_MASK, 
  15.184 +					msg[j].mfn, 
  15.185 +					msg[j].npages<<PAGE_SHIFT, 
  15.186 +					vma->vm_page_prot,
  15.187 +					mmapcmd.dom)) < 0)
  15.188 +					return rc;
  15.189 +			}
  15.190 +		}
  15.191 +		ret = 0;
  15.192 +	}
  15.193 +	break;
  15.194  
  15.195 -    case IOCTL_PRIVCMD_MMAPBATCH:
  15.196 -    {
  15.197 -        mmu_update_t u;
  15.198 -        privcmd_mmapbatch_t m;
  15.199 -        struct vm_area_struct *vma = NULL;
  15.200 -        unsigned long *p, addr;
  15.201 -        unsigned long mfn, ptep;
  15.202 -        int i;
  15.203 +	case IOCTL_PRIVCMD_MMAPBATCH: {
  15.204 +		mmu_update_t u;
  15.205 +		privcmd_mmapbatch_t m;
  15.206 +		struct vm_area_struct *vma = NULL;
  15.207 +		unsigned long *p, addr;
  15.208 +		unsigned long mfn, ptep;
  15.209 +		int i;
  15.210 +
  15.211 +		if (copy_from_user(&m, (void *)data, sizeof(m))) {
  15.212 +			ret = -EFAULT;
  15.213 +			goto batch_err;
  15.214 +		}
  15.215  
  15.216 -        if ( copy_from_user(&m, (void *)data, sizeof(m)) )
  15.217 -        { ret = -EFAULT; goto batch_err; }
  15.218 -
  15.219 -        vma = find_vma( current->mm, m.addr );
  15.220 +		vma = find_vma( current->mm, m.addr );
  15.221 +		if (!vma) {
  15.222 +			ret = -EINVAL;
  15.223 +			goto batch_err;
  15.224 +		}
  15.225  
  15.226 -        if ( !vma )
  15.227 -        { ret = -EINVAL; goto batch_err; }
  15.228 +		if (m.addr > PAGE_OFFSET) {
  15.229 +			ret = -EFAULT;
  15.230 +			goto batch_err;
  15.231 +		}
  15.232  
  15.233 -        if ( m.addr > PAGE_OFFSET )
  15.234 -        { ret = -EFAULT; goto batch_err; }
  15.235 -
  15.236 -        if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
  15.237 -        { ret = -EFAULT; goto batch_err; }
  15.238 +		if ((m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end) {
  15.239 +			ret = -EFAULT;
  15.240 +			goto batch_err;
  15.241 +		}
  15.242  
  15.243 -        p = m.arr;
  15.244 -        addr = m.addr;
  15.245 -        for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
  15.246 -        {
  15.247 -            if ( get_user(mfn, p) )
  15.248 -                return -EFAULT;
  15.249 +		p = m.arr;
  15.250 +		addr = m.addr;
  15.251 +		for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
  15.252 +			if (get_user(mfn, p))
  15.253 +				return -EFAULT;
  15.254  
  15.255 -            ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
  15.256 -            if (ret)
  15.257 -                goto batch_err;
  15.258 +			ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
  15.259 +			if (ret)
  15.260 +				goto batch_err;
  15.261  
  15.262 -            u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
  15.263 -            u.ptr = ptep;
  15.264 +			u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
  15.265 +			u.ptr = ptep;
  15.266  
  15.267 -            if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0) )
  15.268 -                put_user(0xF0000000 | mfn, p);
  15.269 -        }
  15.270 +			if (HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0)
  15.271 +				put_user(0xF0000000 | mfn, p);
  15.272 +		}
  15.273  
  15.274 -        ret = 0;
  15.275 -        break;
  15.276 +		ret = 0;
  15.277 +		break;
  15.278  
  15.279 -    batch_err:
  15.280 -        printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n", 
  15.281 -               ret, vma, m.addr, m.num, m.arr,
  15.282 -               vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
  15.283 -        break;
  15.284 -    }
  15.285 -    break;
  15.286 +	batch_err:
  15.287 +		printk("batch_err ret=%d vma=%p addr=%lx "
  15.288 +		       "num=%d arr=%p %lx-%lx\n", 
  15.289 +		       ret, vma, m.addr, m.num, m.arr,
  15.290 +		       vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
  15.291 +		break;
  15.292 +	}
  15.293 +	break;
  15.294  #endif
  15.295  
  15.296 -    case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
  15.297 -    {
  15.298 -        unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
  15.299 -        pgd_t *pgd = pgd_offset_k(m2pv);
  15.300 -        pud_t *pud = pud_offset(pgd, m2pv);
  15.301 -        pmd_t *pmd = pmd_offset(pud, m2pv);
  15.302 -        unsigned long m2p_start_mfn = (*(unsigned long *)pmd) >> PAGE_SHIFT; 
  15.303 -        ret = put_user(m2p_start_mfn, (unsigned long *)data) ? -EFAULT: 0;
  15.304 -    }
  15.305 -    break;
  15.306 +	case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN: {
  15.307 +		unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
  15.308 +		pgd_t *pgd = pgd_offset_k(m2pv);
  15.309 +		pud_t *pud = pud_offset(pgd, m2pv);
  15.310 +		pmd_t *pmd = pmd_offset(pud, m2pv);
  15.311 +		unsigned long m2p_start_mfn =
  15.312 +			(*(unsigned long *)pmd) >> PAGE_SHIFT; 
  15.313 +		ret = put_user(m2p_start_mfn, (unsigned long *)data) ?
  15.314 +			-EFAULT: 0;
  15.315 +	}
  15.316 +	break;
  15.317  
  15.318 -    case IOCTL_PRIVCMD_INITDOMAIN_STORE:
  15.319 -    {
  15.320 -        extern int do_xenbus_probe(void*);
  15.321 -        unsigned long page;
  15.322 +	case IOCTL_PRIVCMD_INITDOMAIN_STORE: {
  15.323 +		extern int do_xenbus_probe(void*);
  15.324 +		unsigned long page;
  15.325  
  15.326 -        if (xen_start_info->store_evtchn != 0) {
  15.327 -            ret = xen_start_info->store_mfn;
  15.328 -            break;
  15.329 -        }
  15.330 +		if (xen_start_info->store_evtchn != 0) {
  15.331 +			ret = xen_start_info->store_mfn;
  15.332 +			break;
  15.333 +		}
  15.334  
  15.335 -        /* Allocate page. */
  15.336 -        page = get_zeroed_page(GFP_KERNEL);
  15.337 -        if (!page) {
  15.338 -            ret = -ENOMEM;
  15.339 -            break;
  15.340 -        }
  15.341 +		/* Allocate page. */
  15.342 +		page = get_zeroed_page(GFP_KERNEL);
  15.343 +		if (!page) {
  15.344 +			ret = -ENOMEM;
  15.345 +			break;
  15.346 +		}
  15.347  
  15.348 -        /* We don't refcnt properly, so set reserved on page.
  15.349 -         * (this allocation is permanent) */
  15.350 -        SetPageReserved(virt_to_page(page));
  15.351 +		/* We don't refcnt properly, so set reserved on page.
  15.352 +		 * (this allocation is permanent) */
  15.353 +		SetPageReserved(virt_to_page(page));
  15.354  
  15.355 -        /* Initial connect. Setup channel and page. */
  15.356 -        xen_start_info->store_evtchn = data;
  15.357 -        xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >>
  15.358 -                                              PAGE_SHIFT);
  15.359 -        ret = xen_start_info->store_mfn;
  15.360 +		/* Initial connect. Setup channel and page. */
  15.361 +		xen_start_info->store_evtchn = data;
  15.362 +		xen_start_info->store_mfn =
  15.363 +			pfn_to_mfn(virt_to_phys((void *)page) >>
  15.364 +				   PAGE_SHIFT);
  15.365 +		ret = xen_start_info->store_mfn;
  15.366  
  15.367 -        /* We'll return then this will wait for daemon to answer */
  15.368 -        kthread_run(do_xenbus_probe, NULL, "xenbus_probe");
  15.369 -    }
  15.370 -    break;
  15.371 +		/* We'll return then this will wait for daemon to answer */
  15.372 +		kthread_run(do_xenbus_probe, NULL, "xenbus_probe");
  15.373 +	}
  15.374 +	break;
  15.375  
  15.376 -    default:
  15.377 -        ret = -EINVAL;
  15.378 -        break;
  15.379 -    }
  15.380 -    return ret;
  15.381 +	default:
  15.382 +		ret = -EINVAL;
  15.383 +		break;
  15.384 +	}
  15.385 +
  15.386 +	return ret;
  15.387  }
  15.388  
  15.389  static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
  15.390  {
  15.391 -    /* DONTCOPY is essential for Xen as copy_page_range is broken. */
  15.392 -    vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
  15.393 +	/* DONTCOPY is essential for Xen as copy_page_range is broken. */
  15.394 +	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
  15.395  
  15.396 -    return 0;
  15.397 +	return 0;
  15.398  }
  15.399  
  15.400  static struct file_operations privcmd_file_ops = {
  15.401 -    .ioctl = privcmd_ioctl,
  15.402 -    .mmap  = privcmd_mmap,
  15.403 +	.ioctl = privcmd_ioctl,
  15.404 +	.mmap  = privcmd_mmap,
  15.405  };
  15.406  
  15.407  
  15.408  static int __init privcmd_init(void)
  15.409  {
  15.410 -    privcmd_intf = create_xen_proc_entry("privcmd", 0400);
  15.411 -    if ( privcmd_intf != NULL )
  15.412 -        privcmd_intf->proc_fops = &privcmd_file_ops;
  15.413 +	privcmd_intf = create_xen_proc_entry("privcmd", 0400);
  15.414 +	if (privcmd_intf != NULL)
  15.415 +		privcmd_intf->proc_fops = &privcmd_file_ops;
  15.416  
  15.417 -    return 0;
  15.418 +	return 0;
  15.419  }
  15.420  
  15.421  __initcall(privcmd_init);
  15.422 +
  15.423 +/*
  15.424 + * Local variables:
  15.425 + *  c-file-style: "linux"
  15.426 + *  indent-tabs-mode: t
  15.427 + *  c-indent-level: 8
  15.428 + *  c-basic-offset: 8
  15.429 + *  tab-width: 8
  15.430 + * End:
  15.431 + */
    16.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h	Thu Sep 22 14:01:01 2005 +0100
    16.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h	Thu Sep 22 14:04:14 2005 +0100
    16.3 @@ -84,3 +84,13 @@ extern int num_frontends;
    16.4  #define MMAP_VADDR(t,_req) ((t)->mmap_vstart + ((_req) * PAGE_SIZE))
    16.5  
    16.6  #endif /* __TPMIF__BACKEND__COMMON_H__ */
    16.7 +
    16.8 +/*
    16.9 + * Local variables:
   16.10 + *  c-file-style: "linux"
   16.11 + *  indent-tabs-mode: t
   16.12 + *  c-indent-level: 8
   16.13 + *  c-basic-offset: 8
   16.14 + *  tab-width: 8
   16.15 + * End:
   16.16 + */
    17.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Thu Sep 22 14:01:01 2005 +0100
    17.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Thu Sep 22 14:04:14 2005 +0100
    17.3 @@ -1075,3 +1075,13 @@ tpmback_init(void)
    17.4  }
    17.5  
    17.6  __initcall(tpmback_init);
    17.7 +
    17.8 +/*
    17.9 + * Local variables:
   17.10 + *  c-file-style: "linux"
   17.11 + *  indent-tabs-mode: t
   17.12 + *  c-indent-level: 8
   17.13 + *  c-basic-offset: 8
   17.14 + *  tab-width: 8
   17.15 + * End:
   17.16 + */
    18.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c	Thu Sep 22 14:01:01 2005 +0100
    18.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c	Thu Sep 22 14:04:14 2005 +0100
    18.3 @@ -268,3 +268,13 @@ void tpmif_xenbus_init(void)
    18.4  {
    18.5  	xenbus_register_backend(&tpmback);
    18.6  }
    18.7 +
    18.8 +/*
    18.9 + * Local variables:
   18.10 + *  c-file-style: "linux"
   18.11 + *  indent-tabs-mode: t
   18.12 + *  c-indent-level: 8
   18.13 + *  c-basic-offset: 8
   18.14 + *  tab-width: 8
   18.15 + * End:
   18.16 + */
    19.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Thu Sep 22 14:01:01 2005 +0100
    19.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Thu Sep 22 14:04:14 2005 +0100
    19.3 @@ -741,3 +741,13 @@ tpmif_init(void)
    19.4  }
    19.5  
    19.6  __initcall(tpmif_init);
    19.7 +
    19.8 +/*
    19.9 + * Local variables:
   19.10 + *  c-file-style: "linux"
   19.11 + *  indent-tabs-mode: t
   19.12 + *  c-indent-level: 8
   19.13 + *  c-basic-offset: 8
   19.14 + *  tab-width: 8
   19.15 + * End:
   19.16 + */
    20.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h	Thu Sep 22 14:01:01 2005 +0100
    20.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h	Thu Sep 22 14:04:14 2005 +0100
    20.3 @@ -38,3 +38,13 @@ struct tx_buffer
    20.4  };
    20.5  
    20.6  #endif
    20.7 +
    20.8 +/*
    20.9 + * Local variables:
   20.10 + *  c-file-style: "linux"
   20.11 + *  indent-tabs-mode: t
   20.12 + *  c-indent-level: 8
   20.13 + *  c-basic-offset: 8
   20.14 + *  tab-width: 8
   20.15 + * End:
   20.16 + */
    21.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbback/control.c	Thu Sep 22 14:01:01 2005 +0100
    21.2 +++ b/linux-2.6-xen-sparse/drivers/xen/usbback/control.c	Thu Sep 22 14:04:14 2005 +0100
    21.3 @@ -59,3 +59,13 @@ void usbif_ctrlif_init(void)
    21.4      memcpy(cmsg.msg, &st, sizeof(st));
    21.5      ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
    21.6  }
    21.7 +
    21.8 +/*
    21.9 + * Local variables:
   21.10 + *  c-file-style: "linux"
   21.11 + *  indent-tabs-mode: t
   21.12 + *  c-indent-level: 8
   21.13 + *  c-basic-offset: 8
   21.14 + *  tab-width: 8
   21.15 + * End:
   21.16 + */
    22.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c	Thu Sep 22 14:01:01 2005 +0100
    22.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c	Thu Sep 22 14:04:14 2005 +0100
    22.3 @@ -231,3 +231,13 @@ void xb_suspend_comms(void)
    22.4  
    22.5  	unbind_evtchn_from_irqhandler(xen_start_info->store_evtchn, &xb_waitq);
    22.6  }
    22.7 +
    22.8 +/*
    22.9 + * Local variables:
   22.10 + *  c-file-style: "linux"
   22.11 + *  indent-tabs-mode: t
   22.12 + *  c-indent-level: 8
   22.13 + *  c-basic-offset: 8
   22.14 + *  tab-width: 8
   22.15 + * End:
   22.16 + */
    23.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h	Thu Sep 22 14:01:01 2005 +0100
    23.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h	Thu Sep 22 14:04:14 2005 +0100
    23.3 @@ -39,3 +39,13 @@ int xs_input_avail(void);
    23.4  extern wait_queue_head_t xb_waitq;
    23.5  
    23.6  #endif /* _XENBUS_COMMS_H */
    23.7 +
    23.8 +/*
    23.9 + * Local variables:
   23.10 + *  c-file-style: "linux"
   23.11 + *  indent-tabs-mode: t
   23.12 + *  c-indent-level: 8
   23.13 + *  c-basic-offset: 8
   23.14 + *  tab-width: 8
   23.15 + * End:
   23.16 + */
    24.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c	Thu Sep 22 14:01:01 2005 +0100
    24.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c	Thu Sep 22 14:04:14 2005 +0100
    24.3 @@ -186,3 +186,13 @@ xenbus_dev_init(void)
    24.4  }
    24.5  
    24.6  __initcall(xenbus_dev_init);
    24.7 +
    24.8 +/*
    24.9 + * Local variables:
   24.10 + *  c-file-style: "linux"
   24.11 + *  indent-tabs-mode: t
   24.12 + *  c-indent-level: 8
   24.13 + *  c-basic-offset: 8
   24.14 + *  tab-width: 8
   24.15 + * End:
   24.16 + */
    25.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c	Thu Sep 22 14:01:01 2005 +0100
    25.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c	Thu Sep 22 14:04:14 2005 +0100
    25.3 @@ -687,3 +687,13 @@ static int __init xenbus_probe_init(void
    25.4  }
    25.5  
    25.6  postcore_initcall(xenbus_probe_init);
    25.7 +
    25.8 +/*
    25.9 + * Local variables:
   25.10 + *  c-file-style: "linux"
   25.11 + *  indent-tabs-mode: t
   25.12 + *  c-indent-level: 8
   25.13 + *  c-basic-offset: 8
   25.14 + *  tab-width: 8
   25.15 + * End:
   25.16 + */
    26.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c	Thu Sep 22 14:01:01 2005 +0100
    26.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c	Thu Sep 22 14:04:14 2005 +0100
    26.3 @@ -566,3 +566,13 @@ int xs_init(void)
    26.4  		return PTR_ERR(watcher);
    26.5  	return 0;
    26.6  }
    26.7 +
    26.8 +/*
    26.9 + * Local variables:
   26.10 + *  c-file-style: "linux"
   26.11 + *  indent-tabs-mode: t
   26.12 + *  c-indent-level: 8
   26.13 + *  c-basic-offset: 8
   26.14 + *  tab-width: 8
   26.15 + * End:
   26.16 + */