ia64/xen-unstable

changeset 7017:d7c794130ac5

Indentation cleanups in linux driver code.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Sep 22 14:04:14 2005 +0100 (2005-09-22)
parents 4cff74aa6246
children ecc77b1c8612
files linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c linux-2.6-xen-sparse/drivers/xen/blkback/common.h linux-2.6-xen-sparse/drivers/xen/blkback/interface.c linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c linux-2.6-xen-sparse/drivers/xen/blkfront/block.h linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c linux-2.6-xen-sparse/drivers/xen/blktap/common.h linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c linux-2.6-xen-sparse/drivers/xen/console/console.c linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c linux-2.6-xen-sparse/drivers/xen/tpmback/common.h linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h linux-2.6-xen-sparse/drivers/xen/usbback/control.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Thu Sep 22 14:01:01 2005 +0100
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Thu Sep 22 14:04:14 2005 +0100
     1.3 @@ -28,12 +28,12 @@
     1.4  #define BATCH_PER_DOMAIN 16
     1.5  
     1.6  static unsigned long mmap_vstart;
     1.7 -#define MMAP_PAGES                                              \
     1.8 -    (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
     1.9 -#define MMAP_VADDR(_req,_seg)                                   \
    1.10 -    (mmap_vstart +                                              \
    1.11 -     ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +    \
    1.12 -     ((_seg) * PAGE_SIZE))
    1.13 +#define MMAP_PAGES						\
    1.14 +	(MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
    1.15 +#define MMAP_VADDR(_req,_seg)						\
    1.16 +	(mmap_vstart +							\
    1.17 +	 ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +	\
    1.18 +	 ((_seg) * PAGE_SIZE))
    1.19  
    1.20  /*
    1.21   * Each outstanding request that we've passed to the lower device layers has a 
    1.22 @@ -42,12 +42,12 @@ static unsigned long mmap_vstart;
    1.23   * response queued for it, with the saved 'id' passed back.
    1.24   */
    1.25  typedef struct {
    1.26 -    blkif_t       *blkif;
    1.27 -    unsigned long  id;
    1.28 -    int            nr_pages;
    1.29 -    atomic_t       pendcnt;
    1.30 -    unsigned short operation;
    1.31 -    int            status;
    1.32 +	blkif_t       *blkif;
    1.33 +	unsigned long  id;
    1.34 +	int            nr_pages;
    1.35 +	atomic_t       pendcnt;
    1.36 +	unsigned short operation;
    1.37 +	int            status;
    1.38  } pending_req_t;
    1.39  
    1.40  /*
    1.41 @@ -68,14 +68,13 @@ static PEND_RING_IDX pending_prod, pendi
    1.42  static request_queue_t *plugged_queue;
    1.43  static inline void flush_plugged_queue(void)
    1.44  {
    1.45 -    request_queue_t *q = plugged_queue;
    1.46 -    if ( q != NULL )
    1.47 -    {
    1.48 -        if ( q->unplug_fn != NULL )
    1.49 -            q->unplug_fn(q);
    1.50 -        blk_put_queue(q);
    1.51 -        plugged_queue = NULL;
    1.52 -    }
    1.53 +	request_queue_t *q = plugged_queue;
    1.54 +	if (q != NULL) {
    1.55 +		if ( q->unplug_fn != NULL )
    1.56 +			q->unplug_fn(q);
    1.57 +		blk_put_queue(q);
    1.58 +		plugged_queue = NULL;
    1.59 +	}
    1.60  }
    1.61  
    1.62  /* When using grant tables to map a frame for device access then the
    1.63 @@ -106,24 +105,23 @@ static void make_response(blkif_t *blkif
    1.64  
    1.65  static void fast_flush_area(int idx, int nr_pages)
    1.66  {
    1.67 -    struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    1.68 -    unsigned int i, invcount = 0;
    1.69 -    u16 handle;
    1.70 +	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    1.71 +	unsigned int i, invcount = 0;
    1.72 +	u16 handle;
    1.73  
    1.74 -    for ( i = 0; i < nr_pages; i++ )
    1.75 -    {
    1.76 -        if ( BLKBACK_INVALID_HANDLE != ( handle = pending_handle(idx, i) ) )
    1.77 -        {
    1.78 -            unmap[i].host_addr      = MMAP_VADDR(idx, i);
    1.79 -            unmap[i].dev_bus_addr   = 0;
    1.80 -            unmap[i].handle         = handle;
    1.81 -            pending_handle(idx, i)  = BLKBACK_INVALID_HANDLE;
    1.82 -            invcount++;
    1.83 -        }
    1.84 -    }
    1.85 -    if ( unlikely(HYPERVISOR_grant_table_op(
    1.86 -                    GNTTABOP_unmap_grant_ref, unmap, invcount)))
    1.87 -        BUG();
    1.88 +	for (i = 0; i < nr_pages; i++) {
    1.89 +		handle = pending_handle(idx, i);
    1.90 +		if (handle == BLKBACK_INVALID_HANDLE)
    1.91 +			continue;
    1.92 +		unmap[i].host_addr      = MMAP_VADDR(idx, i);
    1.93 +		unmap[i].dev_bus_addr   = 0;
    1.94 +		unmap[i].handle         = handle;
    1.95 +		pending_handle(idx, i)  = BLKBACK_INVALID_HANDLE;
    1.96 +		invcount++;
    1.97 +	}
    1.98 +
    1.99 +	BUG_ON(HYPERVISOR_grant_table_op(
   1.100 +		GNTTABOP_unmap_grant_ref, unmap, invcount));
   1.101  }
   1.102  
   1.103  
   1.104 @@ -136,34 +134,38 @@ static spinlock_t blkio_schedule_list_lo
   1.105  
   1.106  static int __on_blkdev_list(blkif_t *blkif)
   1.107  {
   1.108 -    return blkif->blkdev_list.next != NULL;
   1.109 +	return blkif->blkdev_list.next != NULL;
   1.110  }
   1.111  
   1.112  static void remove_from_blkdev_list(blkif_t *blkif)
   1.113  {
   1.114 -    unsigned long flags;
   1.115 -    if ( !__on_blkdev_list(blkif) ) return;
   1.116 -    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   1.117 -    if ( __on_blkdev_list(blkif) )
   1.118 -    {
   1.119 -        list_del(&blkif->blkdev_list);
   1.120 -        blkif->blkdev_list.next = NULL;
   1.121 -        blkif_put(blkif);
   1.122 -    }
   1.123 -    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   1.124 +	unsigned long flags;
   1.125 +
   1.126 +	if (!__on_blkdev_list(blkif))
   1.127 +		return;
   1.128 +
   1.129 +	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   1.130 +	if (__on_blkdev_list(blkif)) {
   1.131 +		list_del(&blkif->blkdev_list);
   1.132 +		blkif->blkdev_list.next = NULL;
   1.133 +		blkif_put(blkif);
   1.134 +	}
   1.135 +	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   1.136  }
   1.137  
   1.138  static void add_to_blkdev_list_tail(blkif_t *blkif)
   1.139  {
   1.140 -    unsigned long flags;
   1.141 -    if ( __on_blkdev_list(blkif) ) return;
   1.142 -    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   1.143 -    if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
   1.144 -    {
   1.145 -        list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
   1.146 -        blkif_get(blkif);
   1.147 -    }
   1.148 -    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   1.149 +	unsigned long flags;
   1.150 +
   1.151 +	if (__on_blkdev_list(blkif))
   1.152 +		return;
   1.153 +
   1.154 +	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   1.155 +	if (!__on_blkdev_list(blkif) && (blkif->status == CONNECTED)) {
   1.156 +		list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
   1.157 +		blkif_get(blkif);
   1.158 +	}
   1.159 +	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   1.160  }
   1.161  
   1.162  
   1.163 @@ -175,54 +177,53 @@ static DECLARE_WAIT_QUEUE_HEAD(blkio_sch
   1.164  
   1.165  static int blkio_schedule(void *arg)
   1.166  {
   1.167 -    DECLARE_WAITQUEUE(wq, current);
   1.168 -
   1.169 -    blkif_t          *blkif;
   1.170 -    struct list_head *ent;
   1.171 -
   1.172 -    daemonize("xenblkd");
   1.173 +	DECLARE_WAITQUEUE(wq, current);
   1.174  
   1.175 -    for ( ; ; )
   1.176 -    {
   1.177 -        /* Wait for work to do. */
   1.178 -        add_wait_queue(&blkio_schedule_wait, &wq);
   1.179 -        set_current_state(TASK_INTERRUPTIBLE);
   1.180 -        if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
   1.181 -             list_empty(&blkio_schedule_list) )
   1.182 -            schedule();
   1.183 -        __set_current_state(TASK_RUNNING);
   1.184 -        remove_wait_queue(&blkio_schedule_wait, &wq);
   1.185 +	blkif_t          *blkif;
   1.186 +	struct list_head *ent;
   1.187  
   1.188 -        /* Queue up a batch of requests. */
   1.189 -        while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
   1.190 -                !list_empty(&blkio_schedule_list) )
   1.191 -        {
   1.192 -            ent = blkio_schedule_list.next;
   1.193 -            blkif = list_entry(ent, blkif_t, blkdev_list);
   1.194 -            blkif_get(blkif);
   1.195 -            remove_from_blkdev_list(blkif);
   1.196 -            if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
   1.197 -                add_to_blkdev_list_tail(blkif);
   1.198 -            blkif_put(blkif);
   1.199 -        }
   1.200 +	daemonize("xenblkd");
   1.201  
   1.202 -        /* Push the batch through to disc. */
   1.203 -        flush_plugged_queue();
   1.204 -    }
   1.205 +	for (;;) {
   1.206 +		/* Wait for work to do. */
   1.207 +		add_wait_queue(&blkio_schedule_wait, &wq);
   1.208 +		set_current_state(TASK_INTERRUPTIBLE);
   1.209 +		if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
   1.210 +		     list_empty(&blkio_schedule_list) )
   1.211 +			schedule();
   1.212 +		__set_current_state(TASK_RUNNING);
   1.213 +		remove_wait_queue(&blkio_schedule_wait, &wq);
   1.214 +
   1.215 +		/* Queue up a batch of requests. */
   1.216 +		while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
   1.217 +		       !list_empty(&blkio_schedule_list)) {
   1.218 +			ent = blkio_schedule_list.next;
   1.219 +			blkif = list_entry(ent, blkif_t, blkdev_list);
   1.220 +			blkif_get(blkif);
   1.221 +			remove_from_blkdev_list(blkif);
   1.222 +			if (do_block_io_op(blkif, BATCH_PER_DOMAIN))
   1.223 +				add_to_blkdev_list_tail(blkif);
   1.224 +			blkif_put(blkif);
   1.225 +		}
   1.226 +
   1.227 +		/* Push the batch through to disc. */
   1.228 +		flush_plugged_queue();
   1.229 +	}
   1.230  }
   1.231  
   1.232  static void maybe_trigger_blkio_schedule(void)
   1.233  {
   1.234 -    /*
   1.235 -     * Needed so that two processes, who together make the following predicate
   1.236 -     * true, don't both read stale values and evaluate the predicate
   1.237 -     * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
   1.238 -     */
   1.239 -    smp_mb();
   1.240 +	/*
   1.241 +	 * Needed so that two processes, which together make the following
   1.242 +	 * predicate true, don't both read stale values and evaluate the
   1.243 +	 * predicate incorrectly. Incredibly unlikely to stall the scheduler
   1.244 +	 * on x86, but...
   1.245 +	 */
   1.246 +	smp_mb();
   1.247  
   1.248 -    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
   1.249 -         !list_empty(&blkio_schedule_list) )
   1.250 -        wake_up(&blkio_schedule_wait);
   1.251 +	if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
   1.252 +	    !list_empty(&blkio_schedule_list))
   1.253 +		wake_up(&blkio_schedule_wait);
   1.254  }
   1.255  
   1.256  
   1.257 @@ -233,36 +234,34 @@ static void maybe_trigger_blkio_schedule
   1.258  
   1.259  static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
   1.260  {
   1.261 -    unsigned long flags;
   1.262 -
   1.263 -    /* An error fails the entire request. */
   1.264 -    if ( !uptodate )
   1.265 -    {
   1.266 -        DPRINTK("Buffer not up-to-date at end of operation\n");
   1.267 -        pending_req->status = BLKIF_RSP_ERROR;
   1.268 -    }
   1.269 +	unsigned long flags;
   1.270  
   1.271 -    if ( atomic_dec_and_test(&pending_req->pendcnt) )
   1.272 -    {
   1.273 -        int pending_idx = pending_req - pending_reqs;
   1.274 -        fast_flush_area(pending_idx, pending_req->nr_pages);
   1.275 -        make_response(pending_req->blkif, pending_req->id,
   1.276 -                      pending_req->operation, pending_req->status);
   1.277 -        blkif_put(pending_req->blkif);
   1.278 -        spin_lock_irqsave(&pend_prod_lock, flags);
   1.279 -        pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   1.280 -        spin_unlock_irqrestore(&pend_prod_lock, flags);
   1.281 -        maybe_trigger_blkio_schedule();
   1.282 -    }
   1.283 +	/* An error fails the entire request. */
   1.284 +	if (!uptodate) {
   1.285 +		DPRINTK("Buffer not up-to-date at end of operation\n");
   1.286 +		pending_req->status = BLKIF_RSP_ERROR;
   1.287 +	}
   1.288 +
   1.289 +	if (atomic_dec_and_test(&pending_req->pendcnt)) {
   1.290 +		int pending_idx = pending_req - pending_reqs;
   1.291 +		fast_flush_area(pending_idx, pending_req->nr_pages);
   1.292 +		make_response(pending_req->blkif, pending_req->id,
   1.293 +			      pending_req->operation, pending_req->status);
   1.294 +		blkif_put(pending_req->blkif);
   1.295 +		spin_lock_irqsave(&pend_prod_lock, flags);
   1.296 +		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   1.297 +		spin_unlock_irqrestore(&pend_prod_lock, flags);
   1.298 +		maybe_trigger_blkio_schedule();
   1.299 +	}
   1.300  }
   1.301  
   1.302  static int end_block_io_op(struct bio *bio, unsigned int done, int error)
   1.303  {
   1.304 -    if ( bio->bi_size != 0 )
   1.305 -        return 1;
   1.306 -    __end_block_io_op(bio->bi_private, !error);
   1.307 -    bio_put(bio);
   1.308 -    return error;
   1.309 +	if (bio->bi_size != 0)
   1.310 +		return 1;
   1.311 +	__end_block_io_op(bio->bi_private, !error);
   1.312 +	bio_put(bio);
   1.313 +	return error;
   1.314  }
   1.315  
   1.316  
   1.317 @@ -272,10 +271,10 @@ static int end_block_io_op(struct bio *b
   1.318  
   1.319  irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
   1.320  {
   1.321 -    blkif_t *blkif = dev_id;
   1.322 -    add_to_blkdev_list_tail(blkif);
   1.323 -    maybe_trigger_blkio_schedule();
   1.324 -    return IRQ_HANDLED;
   1.325 +	blkif_t *blkif = dev_id;
   1.326 +	add_to_blkdev_list_tail(blkif);
   1.327 +	maybe_trigger_blkio_schedule();
   1.328 +	return IRQ_HANDLED;
   1.329  }
   1.330  
   1.331  
   1.332 @@ -286,183 +285,174 @@ irqreturn_t blkif_be_int(int irq, void *
   1.333  
   1.334  static int do_block_io_op(blkif_t *blkif, int max_to_do)
   1.335  {
   1.336 -    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   1.337 -    blkif_request_t *req;
   1.338 -    RING_IDX i, rp;
   1.339 -    int more_to_do = 0;
   1.340 -
   1.341 -    rp = blk_ring->sring->req_prod;
   1.342 -    rmb(); /* Ensure we see queued requests up to 'rp'. */
   1.343 +	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   1.344 +	blkif_request_t *req;
   1.345 +	RING_IDX i, rp;
   1.346 +	int more_to_do = 0;
   1.347  
   1.348 -    for ( i = blk_ring->req_cons; 
   1.349 -         (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
   1.350 -          i++ )
   1.351 -    {
   1.352 -        if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
   1.353 -        {
   1.354 -            more_to_do = 1;
   1.355 -            break;
   1.356 -        }
   1.357 +	rp = blk_ring->sring->req_prod;
   1.358 +	rmb(); /* Ensure we see queued requests up to 'rp'. */
   1.359 +
   1.360 +	for (i = blk_ring->req_cons; 
   1.361 +	     (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
   1.362 +	     i++) {
   1.363 +		if ((max_to_do-- == 0) ||
   1.364 +		    (NR_PENDING_REQS == MAX_PENDING_REQS)) {
   1.365 +			more_to_do = 1;
   1.366 +			break;
   1.367 +		}
   1.368          
   1.369 -        req = RING_GET_REQUEST(blk_ring, i);
   1.370 -        switch ( req->operation )
   1.371 -        {
   1.372 -        case BLKIF_OP_READ:
   1.373 -        case BLKIF_OP_WRITE:
   1.374 -            dispatch_rw_block_io(blkif, req);
   1.375 -            break;
   1.376 +		req = RING_GET_REQUEST(blk_ring, i);
   1.377 +		switch (req->operation) {
   1.378 +		case BLKIF_OP_READ:
   1.379 +		case BLKIF_OP_WRITE:
   1.380 +			dispatch_rw_block_io(blkif, req);
   1.381 +			break;
   1.382  
   1.383 -        default:
   1.384 -            DPRINTK("error: unknown block io operation [%d]\n",
   1.385 -                    req->operation);
   1.386 -            make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
   1.387 -            break;
   1.388 -        }
   1.389 -    }
   1.390 +		default:
   1.391 +			DPRINTK("error: unknown block io operation [%d]\n",
   1.392 +				req->operation);
   1.393 +			make_response(blkif, req->id, req->operation,
   1.394 +				      BLKIF_RSP_ERROR);
   1.395 +			break;
   1.396 +		}
   1.397 +	}
   1.398  
   1.399 -    blk_ring->req_cons = i;
   1.400 -    return more_to_do;
   1.401 +	blk_ring->req_cons = i;
   1.402 +	return more_to_do;
   1.403  }
   1.404  
   1.405  static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
   1.406  {
   1.407 -    extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 
   1.408 -    int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
   1.409 -    unsigned long fas = 0;
   1.410 -    int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
   1.411 -    pending_req_t *pending_req;
   1.412 -    struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   1.413 -    struct phys_req preq;
   1.414 -    struct { 
   1.415 -        unsigned long buf; unsigned int nsec;
   1.416 -    } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   1.417 -    unsigned int nseg;
   1.418 -    struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   1.419 -    int nbio = 0;
   1.420 -    request_queue_t *q;
   1.421 -
   1.422 -    /* Check that number of segments is sane. */
   1.423 -    nseg = req->nr_segments;
   1.424 -    if ( unlikely(nseg == 0) || 
   1.425 -         unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
   1.426 -    {
   1.427 -        DPRINTK("Bad number of segments in request (%d)\n", nseg);
   1.428 -        goto bad_descriptor;
   1.429 -    }
   1.430 -
   1.431 -    preq.dev           = req->handle;
   1.432 -    preq.sector_number = req->sector_number;
   1.433 -    preq.nr_sects      = 0;
   1.434 -
   1.435 -    for ( i = 0; i < nseg; i++ )
   1.436 -    {
   1.437 -        fas         = req->frame_and_sects[i];
   1.438 -        seg[i].nsec = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
   1.439 -
   1.440 -        if ( seg[i].nsec <= 0 )
   1.441 -            goto bad_descriptor;
   1.442 -        preq.nr_sects += seg[i].nsec;
   1.443 -
   1.444 -        map[i].host_addr = MMAP_VADDR(pending_idx, i);
   1.445 -        map[i].dom = blkif->domid;
   1.446 -        map[i].ref = blkif_gref_from_fas(fas);
   1.447 -        map[i].flags = GNTMAP_host_map;
   1.448 -        if ( operation == WRITE )
   1.449 -            map[i].flags |= GNTMAP_readonly;
   1.450 -    }
   1.451 -
   1.452 -    if ( unlikely(HYPERVISOR_grant_table_op(
   1.453 -                    GNTTABOP_map_grant_ref, map, nseg)))
   1.454 -        BUG();
   1.455 -
   1.456 -    for ( i = 0; i < nseg; i++ )
   1.457 -    {
   1.458 -        if ( unlikely(map[i].handle < 0) )
   1.459 -        {
   1.460 -            DPRINTK("invalid buffer -- could not remap it\n");
   1.461 -            fast_flush_area(pending_idx, nseg);
   1.462 -            goto bad_descriptor;
   1.463 -        }
   1.464 -
   1.465 -        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
   1.466 -            FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
   1.467 -
   1.468 -        pending_handle(pending_idx, i) = map[i].handle;
   1.469 -    }
   1.470 +	extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 
   1.471 +	int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
   1.472 +	unsigned long fas = 0;
   1.473 +	int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
   1.474 +	pending_req_t *pending_req;
   1.475 +	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   1.476 +	struct phys_req preq;
   1.477 +	struct { 
   1.478 +		unsigned long buf; unsigned int nsec;
   1.479 +	} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   1.480 +	unsigned int nseg;
   1.481 +	struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   1.482 +	int nbio = 0;
   1.483 +	request_queue_t *q;
   1.484  
   1.485 -    for ( i = 0; i < nseg; i++ )
   1.486 -    {
   1.487 -        fas         = req->frame_and_sects[i];
   1.488 -        seg[i].buf  = map[i].dev_bus_addr | (blkif_first_sect(fas) << 9);
   1.489 -    }
   1.490 -
   1.491 -    if ( vbd_translate(&preq, blkif, operation) != 0 )
   1.492 -    {
   1.493 -        DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 
   1.494 -                operation == READ ? "read" : "write", preq.sector_number,
   1.495 -                preq.sector_number + preq.nr_sects, preq.dev); 
   1.496 -        goto bad_descriptor;
   1.497 -    }
   1.498 -
   1.499 -    pending_req = &pending_reqs[pending_idx];
   1.500 -    pending_req->blkif     = blkif;
   1.501 -    pending_req->id        = req->id;
   1.502 -    pending_req->operation = operation;
   1.503 -    pending_req->status    = BLKIF_RSP_OKAY;
   1.504 -    pending_req->nr_pages  = nseg;
   1.505 -
   1.506 -    for ( i = 0; i < nseg; i++ )
   1.507 -    {
   1.508 -        if ( ((int)preq.sector_number|(int)seg[i].nsec) &
   1.509 -             ((bdev_hardsect_size(preq.bdev) >> 9) - 1) )
   1.510 -        {
   1.511 -            DPRINTK("Misaligned I/O request from domain %d", blkif->domid);
   1.512 -            goto cleanup_and_fail;
   1.513 -        }
   1.514 +	/* Check that number of segments is sane. */
   1.515 +	nseg = req->nr_segments;
   1.516 +	if (unlikely(nseg == 0) || 
   1.517 +	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
   1.518 +		DPRINTK("Bad number of segments in request (%d)\n", nseg);
   1.519 +		goto bad_descriptor;
   1.520 +	}
   1.521  
   1.522 -        while ( (bio == NULL) ||
   1.523 -                (bio_add_page(bio,
   1.524 -                              virt_to_page(MMAP_VADDR(pending_idx, i)),
   1.525 -                              seg[i].nsec << 9,
   1.526 -                              seg[i].buf & ~PAGE_MASK) == 0) )
   1.527 -        {
   1.528 -            bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
   1.529 -            if ( unlikely(bio == NULL) )
   1.530 -            {
   1.531 -            cleanup_and_fail:
   1.532 -                for ( i = 0; i < (nbio-1); i++ )
   1.533 -                    bio_put(biolist[i]);
   1.534 -                fast_flush_area(pending_idx, nseg);
   1.535 -                goto bad_descriptor;
   1.536 -            }
   1.537 +	preq.dev           = req->handle;
   1.538 +	preq.sector_number = req->sector_number;
   1.539 +	preq.nr_sects      = 0;
   1.540 +
   1.541 +	for (i = 0; i < nseg; i++) {
   1.542 +		fas         = req->frame_and_sects[i];
   1.543 +		seg[i].nsec = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
   1.544 +
   1.545 +		if (seg[i].nsec <= 0)
   1.546 +			goto bad_descriptor;
   1.547 +		preq.nr_sects += seg[i].nsec;
   1.548 +
   1.549 +		map[i].host_addr = MMAP_VADDR(pending_idx, i);
   1.550 +		map[i].dom = blkif->domid;
   1.551 +		map[i].ref = blkif_gref_from_fas(fas);
   1.552 +		map[i].flags = GNTMAP_host_map;
   1.553 +		if ( operation == WRITE )
   1.554 +			map[i].flags |= GNTMAP_readonly;
   1.555 +	}
   1.556 +
   1.557 +	BUG_ON(HYPERVISOR_grant_table_op(
   1.558 +		GNTTABOP_map_grant_ref, map, nseg));
   1.559 +
   1.560 +	for (i = 0; i < nseg; i++) {
   1.561 +		if (unlikely(map[i].handle < 0)) {
   1.562 +			DPRINTK("invalid buffer -- could not remap it\n");
   1.563 +			fast_flush_area(pending_idx, nseg);
   1.564 +			goto bad_descriptor;
   1.565 +		}
   1.566 +
   1.567 +		phys_to_machine_mapping[__pa(MMAP_VADDR(
   1.568 +			pending_idx, i)) >> PAGE_SHIFT] =
   1.569 +			FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
   1.570 +
   1.571 +		pending_handle(pending_idx, i) = map[i].handle;
   1.572 +	}
   1.573 +
   1.574 +	for (i = 0; i < nseg; i++) {
   1.575 +		fas         = req->frame_and_sects[i];
   1.576 +		seg[i].buf  = map[i].dev_bus_addr | 
   1.577 +			(blkif_first_sect(fas) << 9);
   1.578 +	}
   1.579 +
   1.580 +	if (vbd_translate(&preq, blkif, operation) != 0) {
   1.581 +		DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 
   1.582 +			operation == READ ? "read" : "write",
   1.583 +			preq.sector_number,
   1.584 +			preq.sector_number + preq.nr_sects, preq.dev); 
   1.585 +		goto bad_descriptor;
   1.586 +	}
   1.587 +
   1.588 +	pending_req = &pending_reqs[pending_idx];
   1.589 +	pending_req->blkif     = blkif;
   1.590 +	pending_req->id        = req->id;
   1.591 +	pending_req->operation = operation;
   1.592 +	pending_req->status    = BLKIF_RSP_OKAY;
   1.593 +	pending_req->nr_pages  = nseg;
   1.594 +
   1.595 +	for (i = 0; i < nseg; i++) {
   1.596 +		if (((int)preq.sector_number|(int)seg[i].nsec) &
   1.597 +		    ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
   1.598 +			DPRINTK("Misaligned I/O request from domain %d",
   1.599 +				blkif->domid);
   1.600 +			goto cleanup_and_fail;
   1.601 +		}
   1.602 +
   1.603 +		while ((bio == NULL) ||
   1.604 +		       (bio_add_page(bio,
   1.605 +				     virt_to_page(MMAP_VADDR(pending_idx, i)),
   1.606 +				     seg[i].nsec << 9,
   1.607 +				     seg[i].buf & ~PAGE_MASK) == 0)) {
   1.608 +			bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
   1.609 +			if (unlikely(bio == NULL)) {
   1.610 +			cleanup_and_fail:
   1.611 +				for (i = 0; i < (nbio-1); i++)
   1.612 +					bio_put(biolist[i]);
   1.613 +				fast_flush_area(pending_idx, nseg);
   1.614 +				goto bad_descriptor;
   1.615 +			}
   1.616                  
   1.617 -            bio->bi_bdev    = preq.bdev;
   1.618 -            bio->bi_private = pending_req;
   1.619 -            bio->bi_end_io  = end_block_io_op;
   1.620 -            bio->bi_sector  = preq.sector_number;
   1.621 -        }
   1.622 -
   1.623 -        preq.sector_number += seg[i].nsec;
   1.624 -    }
   1.625 +			bio->bi_bdev    = preq.bdev;
   1.626 +			bio->bi_private = pending_req;
   1.627 +			bio->bi_end_io  = end_block_io_op;
   1.628 +			bio->bi_sector  = preq.sector_number;
   1.629 +		}
   1.630  
   1.631 -    if ( (q = bdev_get_queue(bio->bi_bdev)) != plugged_queue )
   1.632 -    {
   1.633 -        flush_plugged_queue();
   1.634 -        blk_get_queue(q);
   1.635 -        plugged_queue = q;
   1.636 -    }
   1.637 +		preq.sector_number += seg[i].nsec;
   1.638 +	}
   1.639  
   1.640 -    atomic_set(&pending_req->pendcnt, nbio);
   1.641 -    pending_cons++;
   1.642 -    blkif_get(blkif);
   1.643 +	if ((q = bdev_get_queue(bio->bi_bdev)) != plugged_queue) {
   1.644 +		flush_plugged_queue();
   1.645 +		blk_get_queue(q);
   1.646 +		plugged_queue = q;
   1.647 +	}
   1.648  
   1.649 -    for ( i = 0; i < nbio; i++ )
   1.650 -        submit_bio(operation, biolist[i]);
   1.651 +	atomic_set(&pending_req->pendcnt, nbio);
   1.652 +	pending_cons++;
   1.653 +	blkif_get(blkif);
   1.654  
   1.655 -    return;
   1.656 +	for (i = 0; i < nbio; i++)
   1.657 +		submit_bio(operation, biolist[i]);
   1.658 +
   1.659 +	return;
   1.660  
   1.661   bad_descriptor:
   1.662 -    make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
   1.663 +	make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
   1.664  } 
   1.665  
   1.666  
   1.667 @@ -475,66 +465,71 @@ static void dispatch_rw_block_io(blkif_t
   1.668  static void make_response(blkif_t *blkif, unsigned long id, 
   1.669                            unsigned short op, int st)
   1.670  {
   1.671 -    blkif_response_t *resp;
   1.672 -    unsigned long     flags;
   1.673 -    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   1.674 +	blkif_response_t *resp;
   1.675 +	unsigned long     flags;
   1.676 +	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   1.677  
   1.678 -    /* Place on the response ring for the relevant domain. */ 
   1.679 -    spin_lock_irqsave(&blkif->blk_ring_lock, flags);
   1.680 -    resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
   1.681 -    resp->id        = id;
   1.682 -    resp->operation = op;
   1.683 -    resp->status    = st;
   1.684 -    wmb(); /* Ensure other side can see the response fields. */
   1.685 -    blk_ring->rsp_prod_pvt++;
   1.686 -    RING_PUSH_RESPONSES(blk_ring);
   1.687 -    spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
   1.688 +	/* Place on the response ring for the relevant domain. */ 
   1.689 +	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
   1.690 +	resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
   1.691 +	resp->id        = id;
   1.692 +	resp->operation = op;
   1.693 +	resp->status    = st;
   1.694 +	wmb(); /* Ensure other side can see the response fields. */
   1.695 +	blk_ring->rsp_prod_pvt++;
   1.696 +	RING_PUSH_RESPONSES(blk_ring);
   1.697 +	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
   1.698  
   1.699 -    /* Kick the relevant domain. */
   1.700 -    notify_via_evtchn(blkif->evtchn);
   1.701 +	/* Kick the relevant domain. */
   1.702 +	notify_via_evtchn(blkif->evtchn);
   1.703  }
   1.704  
   1.705  void blkif_deschedule(blkif_t *blkif)
   1.706  {
   1.707 -    remove_from_blkdev_list(blkif);
   1.708 +	remove_from_blkdev_list(blkif);
   1.709  }
   1.710  
   1.711  static int __init blkif_init(void)
   1.712  {
   1.713 -    int i;
   1.714 -    struct page *page;
   1.715 -
   1.716 -    if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
   1.717 -         !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
   1.718 -        return 0;
   1.719 -
   1.720 -    blkif_interface_init();
   1.721 -
   1.722 -    page = balloon_alloc_empty_page_range(MMAP_PAGES);
   1.723 -    BUG_ON(page == NULL);
   1.724 -    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
   1.725 +	int i;
   1.726 +	struct page *page;
   1.727  
   1.728 -    pending_cons = 0;
   1.729 -    pending_prod = MAX_PENDING_REQS;
   1.730 -    memset(pending_reqs, 0, sizeof(pending_reqs));
   1.731 -    for ( i = 0; i < MAX_PENDING_REQS; i++ )
   1.732 -        pending_ring[i] = i;
   1.733 -    
   1.734 -    spin_lock_init(&blkio_schedule_list_lock);
   1.735 -    INIT_LIST_HEAD(&blkio_schedule_list);
   1.736 +	if (!(xen_start_info->flags & SIF_INITDOMAIN) &&
   1.737 +	    !(xen_start_info->flags & SIF_BLK_BE_DOMAIN))
   1.738 +		return 0;
   1.739  
   1.740 -    if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
   1.741 -        BUG();
   1.742 -
   1.743 -    blkif_xenbus_init();
   1.744 +	blkif_interface_init();
   1.745  
   1.746 -    memset( pending_grant_handles,  BLKBACK_INVALID_HANDLE, MMAP_PAGES );
   1.747 +	page = balloon_alloc_empty_page_range(MMAP_PAGES);
   1.748 +	BUG_ON(page == NULL);
   1.749 +	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
   1.750  
   1.751 -#ifdef CONFIG_XEN_BLKDEV_TAP_BE
   1.752 -    printk(KERN_ALERT "NOTE: Blkif backend is running with tap support on!\n");
   1.753 -#endif
   1.754 +	pending_cons = 0;
   1.755 +	pending_prod = MAX_PENDING_REQS;
   1.756 +	memset(pending_reqs, 0, sizeof(pending_reqs));
   1.757 +	for (i = 0; i < MAX_PENDING_REQS; i++)
   1.758 +		pending_ring[i] = i;
   1.759 +    
   1.760 +	spin_lock_init(&blkio_schedule_list_lock);
   1.761 +	INIT_LIST_HEAD(&blkio_schedule_list);
   1.762  
   1.763 -    return 0;
   1.764 +	BUG_ON(kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0);
   1.765 +
   1.766 +	blkif_xenbus_init();
   1.767 +
   1.768 +	memset(pending_grant_handles,  BLKBACK_INVALID_HANDLE, MMAP_PAGES);
   1.769 +
   1.770 +	return 0;
   1.771  }
   1.772  
   1.773  __initcall(blkif_init);
   1.774 +
   1.775 +/*
   1.776 + * Local variables:
   1.777 + *  c-file-style: "linux"
   1.778 + *  indent-tabs-mode: t
   1.779 + *  c-indent-level: 8
   1.780 + *  c-basic-offset: 8
   1.781 + *  tab-width: 8
   1.782 + * End:
   1.783 + */
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/common.h	Thu Sep 22 14:01:01 2005 +0100
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/common.h	Thu Sep 22 14:04:14 2005 +0100
     2.3 @@ -31,39 +31,39 @@
     2.4  #endif
     2.5  
     2.6  struct vbd {
     2.7 -    blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
     2.8 -    unsigned char  readonly;    /* Non-zero -> read-only */
     2.9 -    unsigned char  type;        /* VDISK_xxx */
    2.10 -    u32            pdevice;     /* phys device that this vbd maps to */
    2.11 -    struct block_device *bdev;
    2.12 +	blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
    2.13 +	unsigned char  readonly;    /* Non-zero -> read-only */
    2.14 +	unsigned char  type;        /* VDISK_xxx */
    2.15 +	u32            pdevice;     /* phys device that this vbd maps to */
    2.16 +	struct block_device *bdev;
    2.17  }; 
    2.18  
    2.19  typedef struct blkif_st {
    2.20 -    /* Unique identifier for this interface. */
    2.21 -    domid_t           domid;
    2.22 -    unsigned int      handle;
    2.23 -    /* Physical parameters of the comms window. */
    2.24 -    unsigned int      evtchn;
    2.25 -    unsigned int      remote_evtchn;
    2.26 -    /* Comms information. */
    2.27 -    blkif_back_ring_t blk_ring;
    2.28 -    struct vm_struct *blk_ring_area;
    2.29 -    /* VBDs attached to this interface. */
    2.30 -    struct vbd        vbd;
    2.31 -    /* Private fields. */
    2.32 -    enum { DISCONNECTED, CONNECTED } status;
    2.33 +	/* Unique identifier for this interface. */
    2.34 +	domid_t           domid;
    2.35 +	unsigned int      handle;
    2.36 +	/* Physical parameters of the comms window. */
    2.37 +	unsigned int      evtchn;
    2.38 +	unsigned int      remote_evtchn;
    2.39 +	/* Comms information. */
    2.40 +	blkif_back_ring_t blk_ring;
    2.41 +	struct vm_struct *blk_ring_area;
    2.42 +	/* VBDs attached to this interface. */
    2.43 +	struct vbd        vbd;
    2.44 +	/* Private fields. */
    2.45 +	enum { DISCONNECTED, CONNECTED } status;
    2.46  #ifdef CONFIG_XEN_BLKDEV_TAP_BE
    2.47 -    /* Is this a blktap frontend */
    2.48 -    unsigned int     is_blktap;
    2.49 +	/* Is this a blktap frontend */
    2.50 +	unsigned int     is_blktap;
    2.51  #endif
    2.52 -    struct list_head blkdev_list;
    2.53 -    spinlock_t       blk_ring_lock;
    2.54 -    atomic_t         refcnt;
    2.55 +	struct list_head blkdev_list;
    2.56 +	spinlock_t       blk_ring_lock;
    2.57 +	atomic_t         refcnt;
    2.58  
    2.59 -    struct work_struct free_work;
    2.60 +	struct work_struct free_work;
    2.61  
    2.62 -    u16         shmem_handle;
    2.63 -    grant_ref_t shmem_ref;
    2.64 +	u16         shmem_handle;
    2.65 +	grant_ref_t shmem_ref;
    2.66  } blkif_t;
    2.67  
    2.68  blkif_t *alloc_blkif(domid_t domid);
    2.69 @@ -71,11 +71,11 @@ void free_blkif_callback(blkif_t *blkif)
    2.70  int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
    2.71  
    2.72  #define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
    2.73 -#define blkif_put(_b)                             \
    2.74 -    do {                                          \
    2.75 -        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
    2.76 -            free_blkif_callback(_b);		  \
    2.77 -    } while (0)
    2.78 +#define blkif_put(_b)					\
    2.79 +	do {						\
    2.80 +		if (atomic_dec_and_test(&(_b)->refcnt))	\
    2.81 +			free_blkif_callback(_b);	\
    2.82 +	} while (0)
    2.83  
    2.84  /* Create a vbd. */
    2.85  int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, u32 pdevice,
    2.86 @@ -87,10 +87,10 @@ unsigned int vbd_info(struct vbd *vbd);
    2.87  unsigned long vbd_secsize(struct vbd *vbd);
    2.88  
    2.89  struct phys_req {
    2.90 -    unsigned short       dev;
    2.91 -    unsigned short       nr_sects;
    2.92 -    struct block_device *bdev;
    2.93 -    blkif_sector_t       sector_number;
    2.94 +	unsigned short       dev;
    2.95 +	unsigned short       nr_sects;
    2.96 +	struct block_device *bdev;
    2.97 +	blkif_sector_t       sector_number;
    2.98  };
    2.99  
   2.100  int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation); 
   2.101 @@ -104,3 +104,13 @@ void blkif_xenbus_init(void);
   2.102  irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
   2.103  
   2.104  #endif /* __BLKIF__BACKEND__COMMON_H__ */
   2.105 +
   2.106 +/*
   2.107 + * Local variables:
   2.108 + *  c-file-style: "linux"
   2.109 + *  indent-tabs-mode: t
   2.110 + *  c-indent-level: 8
   2.111 + *  c-basic-offset: 8
   2.112 + *  tab-width: 8
   2.113 + * End:
   2.114 + */
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Thu Sep 22 14:01:01 2005 +0100
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Thu Sep 22 14:04:14 2005 +0100
     3.3 @@ -13,134 +13,144 @@ static kmem_cache_t *blkif_cachep;
     3.4  
     3.5  blkif_t *alloc_blkif(domid_t domid)
     3.6  {
     3.7 -    blkif_t *blkif;
     3.8 -
     3.9 -    blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
    3.10 -    if (!blkif)
    3.11 -	    return ERR_PTR(-ENOMEM);
    3.12 +	blkif_t *blkif;
    3.13  
    3.14 -    memset(blkif, 0, sizeof(*blkif));
    3.15 -    blkif->domid = domid;
    3.16 -    blkif->status = DISCONNECTED;
    3.17 -    spin_lock_init(&blkif->blk_ring_lock);
    3.18 -    atomic_set(&blkif->refcnt, 1);
    3.19 +	blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
    3.20 +	if (!blkif)
    3.21 +		return ERR_PTR(-ENOMEM);
    3.22  
    3.23 -    return blkif;
    3.24 +	memset(blkif, 0, sizeof(*blkif));
    3.25 +	blkif->domid = domid;
    3.26 +	blkif->status = DISCONNECTED;
    3.27 +	spin_lock_init(&blkif->blk_ring_lock);
    3.28 +	atomic_set(&blkif->refcnt, 1);
    3.29 +
    3.30 +	return blkif;
    3.31  }
    3.32  
    3.33  static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
    3.34  {
    3.35 -    struct gnttab_map_grant_ref op;
    3.36 -
    3.37 -    op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
    3.38 -    op.flags     = GNTMAP_host_map;
    3.39 -    op.ref       = shared_page;
    3.40 -    op.dom       = blkif->domid;
    3.41 +	struct gnttab_map_grant_ref op;
    3.42  
    3.43 -    lock_vm_area(blkif->blk_ring_area);
    3.44 -    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
    3.45 -    unlock_vm_area(blkif->blk_ring_area);
    3.46 +	op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
    3.47 +	op.flags     = GNTMAP_host_map;
    3.48 +	op.ref       = shared_page;
    3.49 +	op.dom       = blkif->domid;
    3.50  
    3.51 -    if (op.handle < 0) {
    3.52 -	DPRINTK(" Grant table operation failure !\n");
    3.53 -	return op.handle;
    3.54 -    }
    3.55 +	lock_vm_area(blkif->blk_ring_area);
    3.56 +	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
    3.57 +	unlock_vm_area(blkif->blk_ring_area);
    3.58  
    3.59 -    blkif->shmem_ref = shared_page;
    3.60 -    blkif->shmem_handle = op.handle;
    3.61 +	if (op.handle < 0) {
    3.62 +		DPRINTK(" Grant table operation failure !\n");
    3.63 +		return op.handle;
    3.64 +	}
    3.65  
    3.66 -    return 0;
    3.67 +	blkif->shmem_ref = shared_page;
    3.68 +	blkif->shmem_handle = op.handle;
    3.69 +
    3.70 +	return 0;
    3.71  }
    3.72  
    3.73  static void unmap_frontend_page(blkif_t *blkif)
    3.74  {
    3.75 -    struct gnttab_unmap_grant_ref op;
    3.76 +	struct gnttab_unmap_grant_ref op;
    3.77  
    3.78 -    op.host_addr    = (unsigned long)blkif->blk_ring_area->addr;
    3.79 -    op.handle       = blkif->shmem_handle;
    3.80 -    op.dev_bus_addr = 0;
    3.81 +	op.host_addr    = (unsigned long)blkif->blk_ring_area->addr;
    3.82 +	op.handle       = blkif->shmem_handle;
    3.83 +	op.dev_bus_addr = 0;
    3.84  
    3.85 -    lock_vm_area(blkif->blk_ring_area);
    3.86 -    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
    3.87 -    unlock_vm_area(blkif->blk_ring_area);
    3.88 +	lock_vm_area(blkif->blk_ring_area);
    3.89 +	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
    3.90 +	unlock_vm_area(blkif->blk_ring_area);
    3.91  }
    3.92  
    3.93  int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
    3.94  {
    3.95 -    blkif_sring_t *sring;
    3.96 -    evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
    3.97 -    int err;
    3.98 -
    3.99 -    BUG_ON(blkif->remote_evtchn);
   3.100 -
   3.101 -    if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
   3.102 -	return -ENOMEM;
   3.103 -
   3.104 -    err = map_frontend_page(blkif, shared_page);
   3.105 -    if (err) {
   3.106 -        free_vm_area(blkif->blk_ring_area);
   3.107 -	return err;
   3.108 -    }
   3.109 +	blkif_sring_t *sring;
   3.110 +	evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
   3.111 +	int err;
   3.112  
   3.113 -    op.u.bind_interdomain.dom1 = DOMID_SELF;
   3.114 -    op.u.bind_interdomain.dom2 = blkif->domid;
   3.115 -    op.u.bind_interdomain.port1 = 0;
   3.116 -    op.u.bind_interdomain.port2 = evtchn;
   3.117 -    err = HYPERVISOR_event_channel_op(&op);
   3.118 -    if (err) {
   3.119 -	unmap_frontend_page(blkif);
   3.120 -        free_vm_area(blkif->blk_ring_area);
   3.121 -	return err;
   3.122 -    }
   3.123 +	BUG_ON(blkif->remote_evtchn);
   3.124  
   3.125 -    blkif->evtchn = op.u.bind_interdomain.port1;
   3.126 -    blkif->remote_evtchn = evtchn;
   3.127 +	if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
   3.128 +		return -ENOMEM;
   3.129  
   3.130 -    sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
   3.131 -    SHARED_RING_INIT(sring);
   3.132 -    BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
   3.133 +	err = map_frontend_page(blkif, shared_page);
   3.134 +	if (err) {
   3.135 +		free_vm_area(blkif->blk_ring_area);
   3.136 +		return err;
   3.137 +	}
   3.138  
   3.139 -    bind_evtchn_to_irqhandler(blkif->evtchn, blkif_be_int, 0, "blkif-backend",
   3.140 -			      blkif);
   3.141 -    blkif->status = CONNECTED;
   3.142 +	op.u.bind_interdomain.dom1 = DOMID_SELF;
   3.143 +	op.u.bind_interdomain.dom2 = blkif->domid;
   3.144 +	op.u.bind_interdomain.port1 = 0;
   3.145 +	op.u.bind_interdomain.port2 = evtchn;
   3.146 +	err = HYPERVISOR_event_channel_op(&op);
   3.147 +	if (err) {
   3.148 +		unmap_frontend_page(blkif);
   3.149 +		free_vm_area(blkif->blk_ring_area);
   3.150 +		return err;
   3.151 +	}
   3.152  
   3.153 -    return 0;
   3.154 +	blkif->evtchn = op.u.bind_interdomain.port1;
   3.155 +	blkif->remote_evtchn = evtchn;
   3.156 +
   3.157 +	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
   3.158 +	SHARED_RING_INIT(sring);
   3.159 +	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
   3.160 +
   3.161 +	bind_evtchn_to_irqhandler(
   3.162 +		blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);
   3.163 +	blkif->status = CONNECTED;
   3.164 +
   3.165 +	return 0;
   3.166  }
   3.167  
   3.168  static void free_blkif(void *arg)
   3.169  {
   3.170 -    evtchn_op_t op = { .cmd = EVTCHNOP_close };
   3.171 -    blkif_t *blkif = (blkif_t *)arg;
   3.172 -
   3.173 -    op.u.close.port = blkif->evtchn;
   3.174 -    op.u.close.dom = DOMID_SELF;
   3.175 -    HYPERVISOR_event_channel_op(&op);
   3.176 -    op.u.close.port = blkif->remote_evtchn;
   3.177 -    op.u.close.dom = blkif->domid;
   3.178 -    HYPERVISOR_event_channel_op(&op);
   3.179 +	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   3.180 +	blkif_t *blkif = (blkif_t *)arg;
   3.181  
   3.182 -    vbd_free(&blkif->vbd);
   3.183 -
   3.184 -    if (blkif->evtchn)
   3.185 -        unbind_evtchn_from_irqhandler(blkif->evtchn, blkif);
   3.186 +	op.u.close.port = blkif->evtchn;
   3.187 +	op.u.close.dom = DOMID_SELF;
   3.188 +	HYPERVISOR_event_channel_op(&op);
   3.189 +	op.u.close.port = blkif->remote_evtchn;
   3.190 +	op.u.close.dom = blkif->domid;
   3.191 +	HYPERVISOR_event_channel_op(&op);
   3.192  
   3.193 -    if (blkif->blk_ring.sring) {
   3.194 -	unmap_frontend_page(blkif);
   3.195 -        free_vm_area(blkif->blk_ring_area);
   3.196 -	blkif->blk_ring.sring = NULL;
   3.197 -    }
   3.198 +	vbd_free(&blkif->vbd);
   3.199  
   3.200 -    kmem_cache_free(blkif_cachep, blkif);
   3.201 +	if (blkif->evtchn)
   3.202 +		unbind_evtchn_from_irqhandler(blkif->evtchn, blkif);
   3.203 +
   3.204 +	if (blkif->blk_ring.sring) {
   3.205 +		unmap_frontend_page(blkif);
   3.206 +		free_vm_area(blkif->blk_ring_area);
   3.207 +		blkif->blk_ring.sring = NULL;
   3.208 +	}
   3.209 +
   3.210 +	kmem_cache_free(blkif_cachep, blkif);
   3.211  }
   3.212  
   3.213  void free_blkif_callback(blkif_t *blkif)
   3.214  {
   3.215 -    INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
   3.216 -    schedule_work(&blkif->free_work);
   3.217 +	INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
   3.218 +	schedule_work(&blkif->free_work);
   3.219  }
   3.220  
   3.221  void __init blkif_interface_init(void)
   3.222  {
   3.223 -    blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 
   3.224 -                                     0, 0, NULL, NULL);
   3.225 +	blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 
   3.226 +					 0, 0, NULL, NULL);
   3.227  }
   3.228 +
   3.229 +/*
   3.230 + * Local variables:
   3.231 + *  c-file-style: "linux"
   3.232 + *  indent-tabs-mode: t
   3.233 + *  c-indent-level: 8
   3.234 + *  c-basic-offset: 8
   3.235 + *  tab-width: 8
   3.236 + * End:
   3.237 + */
     4.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c	Thu Sep 22 14:01:01 2005 +0100
     4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c	Thu Sep 22 14:04:14 2005 +0100
     4.3 @@ -11,10 +11,10 @@
     4.4  
     4.5  static inline dev_t vbd_map_devnum(u32 cookie)
     4.6  {
     4.7 -    return MKDEV(BLKIF_MAJOR(cookie), BLKIF_MINOR(cookie));
     4.8 +	return MKDEV(BLKIF_MAJOR(cookie), BLKIF_MINOR(cookie));
     4.9  }
    4.10 -#define vbd_sz(_v)   ((_v)->bdev->bd_part ? \
    4.11 -    (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
    4.12 +#define vbd_sz(_v)   ((_v)->bdev->bd_part ?				\
    4.13 +	(_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
    4.14  #define bdev_put(_b) blkdev_put(_b)
    4.15  
    4.16  unsigned long vbd_size(struct vbd *vbd)
    4.17 @@ -35,63 +35,73 @@ unsigned long vbd_secsize(struct vbd *vb
    4.18  int vbd_create(blkif_t *blkif, blkif_vdev_t handle,
    4.19  	       u32 pdevice, int readonly)
    4.20  {
    4.21 -    struct vbd *vbd;
    4.22 -
    4.23 -    vbd = &blkif->vbd;
    4.24 -    vbd->handle   = handle; 
    4.25 -    vbd->readonly = readonly;
    4.26 -    vbd->type     = 0;
    4.27 -
    4.28 -    vbd->pdevice  = pdevice;
    4.29 +	struct vbd *vbd;
    4.30  
    4.31 -    vbd->bdev = open_by_devnum(
    4.32 -        vbd_map_devnum(vbd->pdevice),
    4.33 -        vbd->readonly ? FMODE_READ : FMODE_WRITE);
    4.34 -    if ( IS_ERR(vbd->bdev) )
    4.35 -    {
    4.36 -        DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
    4.37 -        return -ENOENT;
    4.38 -    }
    4.39 +	vbd = &blkif->vbd;
    4.40 +	vbd->handle   = handle; 
    4.41 +	vbd->readonly = readonly;
    4.42 +	vbd->type     = 0;
    4.43  
    4.44 -    if ( (vbd->bdev->bd_disk == NULL) )
    4.45 -    {
    4.46 -        DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
    4.47 -	vbd_free(vbd);
    4.48 -        return -ENOENT;
    4.49 -    }
    4.50 +	vbd->pdevice  = pdevice;
    4.51  
    4.52 -    if ( vbd->bdev->bd_disk->flags & GENHD_FL_CD )
    4.53 -        vbd->type |= VDISK_CDROM;
    4.54 -    if ( vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE )
    4.55 -        vbd->type |= VDISK_REMOVABLE;
    4.56 +	vbd->bdev = open_by_devnum(
    4.57 +		vbd_map_devnum(vbd->pdevice),
    4.58 +		vbd->readonly ? FMODE_READ : FMODE_WRITE);
    4.59 +	if (IS_ERR(vbd->bdev)) {
    4.60 +		DPRINTK("vbd_creat: device %08x doesn't exist.\n",
    4.61 +			vbd->pdevice);
    4.62 +		return -ENOENT;
    4.63 +	}
    4.64  
    4.65 -    DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
    4.66 -            handle, blkif->domid);
    4.67 -    return 0;
    4.68 +	if (vbd->bdev->bd_disk == NULL) {
    4.69 +		DPRINTK("vbd_creat: device %08x doesn't exist.\n",
    4.70 +			vbd->pdevice);
    4.71 +		vbd_free(vbd);
    4.72 +		return -ENOENT;
    4.73 +	}
    4.74 +
    4.75 +	if (vbd->bdev->bd_disk->flags & GENHD_FL_CD)
    4.76 +		vbd->type |= VDISK_CDROM;
    4.77 +	if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
    4.78 +		vbd->type |= VDISK_REMOVABLE;
    4.79 +
    4.80 +	DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
    4.81 +		handle, blkif->domid);
    4.82 +	return 0;
    4.83  }
    4.84  
    4.85  void vbd_free(struct vbd *vbd)
    4.86  {
    4.87 -    if (vbd->bdev)
    4.88 -	bdev_put(vbd->bdev);
    4.89 -    vbd->bdev = NULL;
    4.90 +	if (vbd->bdev)
    4.91 +		bdev_put(vbd->bdev);
    4.92 +	vbd->bdev = NULL;
    4.93  }
    4.94  
    4.95  int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
    4.96  {
    4.97 -    struct vbd *vbd = &blkif->vbd;
    4.98 -    int rc = -EACCES;
    4.99 -
   4.100 -    if ((operation == WRITE) && vbd->readonly)
   4.101 -        goto out;
   4.102 +	struct vbd *vbd = &blkif->vbd;
   4.103 +	int rc = -EACCES;
   4.104  
   4.105 -    if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
   4.106 -        goto out;
   4.107 +	if ((operation == WRITE) && vbd->readonly)
   4.108 +		goto out;
   4.109  
   4.110 -    req->dev  = vbd->pdevice;
   4.111 -    req->bdev = vbd->bdev;
   4.112 -    rc = 0;
   4.113 +	if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
   4.114 +		goto out;
   4.115 +
   4.116 +	req->dev  = vbd->pdevice;
   4.117 +	req->bdev = vbd->bdev;
   4.118 +	rc = 0;
   4.119  
   4.120   out:
   4.121 -    return rc;
   4.122 +	return rc;
   4.123  }
   4.124 +
   4.125 +/*
   4.126 + * Local variables:
   4.127 + *  c-file-style: "linux"
   4.128 + *  indent-tabs-mode: t
   4.129 + *  c-indent-level: 8
   4.130 + *  c-basic-offset: 8
   4.131 + *  tab-width: 8
   4.132 + * End:
   4.133 + */
     5.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c	Thu Sep 22 14:01:01 2005 +0100
     5.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c	Thu Sep 22 14:04:14 2005 +0100
     5.3 @@ -124,7 +124,7 @@ static void frontend_changed(struct xenb
     5.4  
     5.5  	return;
     5.6  
     5.7 -abort:
     5.8 + abort:
     5.9  	xenbus_transaction_end(1);
    5.10  }
    5.11  
    5.12 @@ -278,3 +278,13 @@ void blkif_xenbus_init(void)
    5.13  {
    5.14  	xenbus_register_backend(&blkback);
    5.15  }
    5.16 +
    5.17 +/*
    5.18 + * Local variables:
    5.19 + *  c-file-style: "linux"
    5.20 + *  indent-tabs-mode: t
    5.21 + *  c-indent-level: 8
    5.22 + *  c-basic-offset: 8
    5.23 + *  tab-width: 8
    5.24 + * End:
    5.25 + */
     6.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h	Thu Sep 22 14:01:01 2005 +0100
     6.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h	Thu Sep 22 14:04:14 2005 +0100
     6.3 @@ -146,4 +146,15 @@ extern void do_blkif_request (request_qu
     6.4  int xlvbd_add(blkif_sector_t capacity, int device,
     6.5  	      u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
     6.6  void xlvbd_del(struct blkfront_info *info);
     6.7 +
     6.8  #endif /* __XEN_DRIVERS_BLOCK_H__ */
     6.9 +
    6.10 +/*
    6.11 + * Local variables:
    6.12 + *  c-file-style: "linux"
    6.13 + *  indent-tabs-mode: t
    6.14 + *  c-indent-level: 8
    6.15 + *  c-basic-offset: 8
    6.16 + *  tab-width: 8
    6.17 + * End:
    6.18 + */
     7.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c	Thu Sep 22 14:01:01 2005 +0100
     7.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c	Thu Sep 22 14:04:14 2005 +0100
     7.3 @@ -65,7 +65,7 @@ static struct xlbd_type_info xlbd_vbd_ty
     7.4  };
     7.5  
     7.6  static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
     7.7 -					  NUM_VBD_MAJORS];
     7.8 +					 NUM_VBD_MAJORS];
     7.9  
    7.10  #define XLBD_MAJOR_IDE_START	0
    7.11  #define XLBD_MAJOR_SCSI_START	(NUM_IDE_MAJORS)
    7.12 @@ -309,3 +309,13 @@ xlvbd_del(struct blkfront_info *info)
    7.13  
    7.14  	bdput(bd);
    7.15  }
    7.16 +
    7.17 +/*
    7.18 + * Local variables:
    7.19 + *  c-file-style: "linux"
    7.20 + *  indent-tabs-mode: t
    7.21 + *  c-indent-level: 8
    7.22 + *  c-basic-offset: 8
    7.23 + *  tab-width: 8
    7.24 + * End:
    7.25 + */
     8.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Thu Sep 22 14:01:01 2005 +0100
     8.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Thu Sep 22 14:04:14 2005 +0100
     8.3 @@ -4,7 +4,6 @@
     8.4   * This is a modified version of the block backend driver that remaps requests
     8.5   * to a user-space memory region.  It is intended to be used to write 
     8.6   * application-level servers that provide block interfaces to client VMs.
     8.7 - * 
     8.8   */
     8.9  
    8.10  #include <linux/kernel.h>
    8.11 @@ -67,20 +66,19 @@ static int blktap_read_ufe_ring(void);
    8.12  
    8.13  static inline int BLKTAP_MODE_VALID(unsigned long arg)
    8.14  {
    8.15 -    return (
    8.16 -        ( arg == BLKTAP_MODE_PASSTHROUGH  ) ||
    8.17 -        ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
    8.18 -        ( arg == BLKTAP_MODE_INTERPOSE    ) );
    8.19 +	return ((arg == BLKTAP_MODE_PASSTHROUGH ) ||
    8.20 +		(arg == BLKTAP_MODE_INTERCEPT_FE) ||
    8.21 +		(arg == BLKTAP_MODE_INTERPOSE   ));
    8.22  /*
    8.23 -    return (
    8.24 -        ( arg == BLKTAP_MODE_PASSTHROUGH  ) ||
    8.25 -        ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
    8.26 -        ( arg == BLKTAP_MODE_INTERCEPT_BE ) ||
    8.27 -        ( arg == BLKTAP_MODE_INTERPOSE    ) ||
    8.28 -        ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) ||
    8.29 -        ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) ||
    8.30 -        ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH )
    8.31 -        );
    8.32 +  return (
    8.33 +  ( arg == BLKTAP_MODE_PASSTHROUGH  ) ||
    8.34 +  ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
    8.35 +  ( arg == BLKTAP_MODE_INTERCEPT_BE ) ||
    8.36 +  ( arg == BLKTAP_MODE_INTERPOSE    ) ||
    8.37 +  ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) ||
    8.38 +  ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) ||
    8.39 +  ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH )
    8.40 +  );
    8.41  */
    8.42  }
    8.43  
    8.44 @@ -110,14 +108,12 @@ unsigned long mmap_vstart;  /* Kernel pa
    8.45  unsigned long rings_vstart; /* start of mmaped vma               */
    8.46  unsigned long user_vstart;  /* start of user mappings            */
    8.47  
    8.48 -#define MMAP_PAGES                                              \
    8.49 -    (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
    8.50 -#define MMAP_VADDR(_start, _req,_seg)                           \
    8.51 -    (_start +                                                   \
    8.52 -     ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +    \
    8.53 -     ((_seg) * PAGE_SIZE))
    8.54 -
    8.55 -
    8.56 +#define MMAP_PAGES						\
    8.57 +	(MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
    8.58 +#define MMAP_VADDR(_start, _req,_seg)					\
    8.59 +	(_start +							\
    8.60 +	 ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +	\
    8.61 +	 ((_seg) * PAGE_SIZE))
    8.62  
    8.63  /*
    8.64   * Each outstanding request that we've passed to the lower device layers has a 
    8.65 @@ -126,12 +122,12 @@ unsigned long user_vstart;  /* start of 
    8.66   * response queued for it, with the saved 'id' passed back.
    8.67   */
    8.68  typedef struct {
    8.69 -    blkif_t       *blkif;
    8.70 -    unsigned long  id;
    8.71 -    int            nr_pages;
    8.72 -    atomic_t       pendcnt;
    8.73 -    unsigned short operation;
    8.74 -    int            status;
    8.75 +	blkif_t       *blkif;
    8.76 +	unsigned long  id;
    8.77 +	int            nr_pages;
    8.78 +	atomic_t       pendcnt;
    8.79 +	unsigned short operation;
    8.80 +	int            status;
    8.81  } pending_req_t;
    8.82  
    8.83  /*
    8.84 @@ -156,17 +152,17 @@ static PEND_RING_IDX pending_prod, pendi
    8.85  
    8.86  static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx)
    8.87  {
    8.88 -    return ( (fe_dom << 16) | MASK_PEND_IDX(idx) );
    8.89 +	return ((fe_dom << 16) | MASK_PEND_IDX(idx));
    8.90  }
    8.91  
    8.92  extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id) 
    8.93  { 
    8.94 -    return (PEND_RING_IDX)( id & 0x0000ffff );
    8.95 +	return (PEND_RING_IDX)(id & 0x0000ffff);
    8.96  }
    8.97  
    8.98  extern inline domid_t ID_TO_DOM(unsigned long id) 
    8.99  { 
   8.100 -    return (domid_t)(id >> 16); 
   8.101 +	return (domid_t)(id >> 16); 
   8.102  }
   8.103  
   8.104  
   8.105 @@ -181,8 +177,8 @@ extern inline domid_t ID_TO_DOM(unsigned
   8.106   */
   8.107  struct grant_handle_pair
   8.108  {
   8.109 -    u16  kernel;
   8.110 -    u16  user;
   8.111 +	u16  kernel;
   8.112 +	u16  user;
   8.113  };
   8.114  static struct grant_handle_pair pending_grant_handles[MMAP_PAGES];
   8.115  #define pending_handle(_idx, _i) \
   8.116 @@ -199,21 +195,20 @@ static struct grant_handle_pair pending_
   8.117   */
   8.118  
   8.119  static struct page *blktap_nopage(struct vm_area_struct *vma,
   8.120 -                                             unsigned long address,
   8.121 -                                             int *type)
   8.122 +				  unsigned long address,
   8.123 +				  int *type)
   8.124  {
   8.125 -    /*
   8.126 -     * if the page has not been mapped in by the driver then generate
   8.127 -     * a SIGBUS to the domain.
   8.128 -     */
   8.129 +	/*
   8.130 +	 * if the page has not been mapped in by the driver then generate
   8.131 +	 * a SIGBUS to the domain.
   8.132 +	 */
   8.133 +	force_sig(SIGBUS, current);
   8.134  
   8.135 -    force_sig(SIGBUS, current);
   8.136 -
   8.137 -    return 0;
   8.138 +	return 0;
   8.139  }
   8.140  
   8.141  struct vm_operations_struct blktap_vm_ops = {
   8.142 -    nopage:   blktap_nopage,
   8.143 +	nopage:   blktap_nopage,
   8.144  };
   8.145  
   8.146  /******************************************************************
   8.147 @@ -222,44 +217,45 @@ struct vm_operations_struct blktap_vm_op
   8.148  
   8.149  static int blktap_open(struct inode *inode, struct file *filp)
   8.150  {
   8.151 -    blkif_sring_t *sring;
   8.152 -    
   8.153 -    if ( test_and_set_bit(0, &blktap_dev_inuse) )
   8.154 -        return -EBUSY;
   8.155 +	blkif_sring_t *sring;
   8.156 +
   8.157 +	if (test_and_set_bit(0, &blktap_dev_inuse))
   8.158 +		return -EBUSY;
   8.159      
   8.160 -    /* Allocate the fe ring. */
   8.161 -    sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
   8.162 -    if (sring == NULL)
   8.163 -        goto fail_nomem;
   8.164 +	/* Allocate the fe ring. */
   8.165 +	sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
   8.166 +	if (sring == NULL)
   8.167 +		goto fail_nomem;
   8.168  
   8.169 -    SetPageReserved(virt_to_page(sring));
   8.170 +	SetPageReserved(virt_to_page(sring));
   8.171      
   8.172 -    SHARED_RING_INIT(sring);
   8.173 -    FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE);
   8.174 +	SHARED_RING_INIT(sring);
   8.175 +	FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE);
   8.176  
   8.177 -    return 0;
   8.178 +	return 0;
   8.179  
   8.180   fail_nomem:
   8.181 -    return -ENOMEM;
   8.182 +	return -ENOMEM;
   8.183  }
   8.184  
   8.185  static int blktap_release(struct inode *inode, struct file *filp)
   8.186  {
   8.187 -    blktap_dev_inuse = 0;
   8.188 -    blktap_ring_ok = 0;
   8.189 -
   8.190 -    /* Free the ring page. */
   8.191 -    ClearPageReserved(virt_to_page(blktap_ufe_ring.sring));
   8.192 -    free_page((unsigned long) blktap_ufe_ring.sring);
   8.193 +	blktap_dev_inuse = 0;
   8.194 +	blktap_ring_ok = 0;
   8.195  
   8.196 -    /* Clear any active mappings and free foreign map table */
   8.197 -    if (blktap_vma != NULL) {
   8.198 -        zap_page_range(blktap_vma, blktap_vma->vm_start, 
   8.199 -                       blktap_vma->vm_end - blktap_vma->vm_start, NULL);
   8.200 -        blktap_vma = NULL;
   8.201 -    }
   8.202 +	/* Free the ring page. */
   8.203 +	ClearPageReserved(virt_to_page(blktap_ufe_ring.sring));
   8.204 +	free_page((unsigned long) blktap_ufe_ring.sring);
   8.205  
   8.206 -    return 0;
   8.207 +	/* Clear any active mappings and free foreign map table */
   8.208 +	if (blktap_vma != NULL) {
   8.209 +		zap_page_range(
   8.210 +			blktap_vma, blktap_vma->vm_start, 
   8.211 +			blktap_vma->vm_end - blktap_vma->vm_start, NULL);
   8.212 +		blktap_vma = NULL;
   8.213 +	}
   8.214 +
   8.215 +	return 0;
   8.216  }
   8.217  
   8.218  
   8.219 @@ -283,128 +279,124 @@ static int blktap_release(struct inode *
   8.220   */
   8.221  static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
   8.222  {
   8.223 -    int size;
   8.224 -    struct page **map;
   8.225 -    int i;
   8.226 -
   8.227 -    DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n",
   8.228 -           vma->vm_start, vma->vm_end);
   8.229 -
   8.230 -    vma->vm_flags |= VM_RESERVED;
   8.231 -    vma->vm_ops = &blktap_vm_ops;
   8.232 -
   8.233 -    size = vma->vm_end - vma->vm_start;
   8.234 -    if ( size != ( (MMAP_PAGES + RING_PAGES) << PAGE_SHIFT ) ) {
   8.235 -        printk(KERN_INFO 
   8.236 -               "blktap: you _must_ map exactly %d pages!\n",
   8.237 -               MMAP_PAGES + RING_PAGES);
   8.238 -        return -EAGAIN;
   8.239 -    }
   8.240 -
   8.241 -    size >>= PAGE_SHIFT;
   8.242 -    DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
   8.243 -    
   8.244 -    rings_vstart = vma->vm_start;
   8.245 -    user_vstart  = rings_vstart + (RING_PAGES << PAGE_SHIFT);
   8.246 -    
   8.247 -    /* Map the ring pages to the start of the region and reserve it. */
   8.248 -
   8.249 -    /* not sure if I really need to do this... */
   8.250 -    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
   8.251 +	int size;
   8.252 +	struct page **map;
   8.253 +	int i;
   8.254  
   8.255 -    if (remap_pfn_range(vma, vma->vm_start, 
   8.256 -                         __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT, 
   8.257 -                         PAGE_SIZE, vma->vm_page_prot)) 
   8.258 -    {
   8.259 -        WPRINTK("Mapping user ring failed!\n");
   8.260 -        goto fail;
   8.261 -    }
   8.262 +	DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n",
   8.263 +		vma->vm_start, vma->vm_end);
   8.264  
   8.265 -    /* Mark this VM as containing foreign pages, and set up mappings. */
   8.266 -    map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
   8.267 -                  * sizeof(struct page_struct*),
   8.268 -                  GFP_KERNEL);
   8.269 -    if (map == NULL) 
   8.270 -    {
   8.271 -        WPRINTK("Couldn't alloc VM_FOREIGH map.\n");
   8.272 -        goto fail;
   8.273 -    }
   8.274 +	vma->vm_flags |= VM_RESERVED;
   8.275 +	vma->vm_ops = &blktap_vm_ops;
   8.276  
   8.277 -    for (i=0; i<((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
   8.278 -        map[i] = NULL;
   8.279 +	size = vma->vm_end - vma->vm_start;
   8.280 +	if (size != ((MMAP_PAGES + RING_PAGES) << PAGE_SHIFT)) {
   8.281 +		printk(KERN_INFO 
   8.282 +		       "blktap: you _must_ map exactly %d pages!\n",
   8.283 +		       MMAP_PAGES + RING_PAGES);
   8.284 +		return -EAGAIN;
   8.285 +	}
   8.286 +
   8.287 +	size >>= PAGE_SHIFT;
   8.288 +	DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
   8.289      
   8.290 -    vma->vm_private_data = map;
   8.291 -    vma->vm_flags |= VM_FOREIGN;
   8.292 -
   8.293 -    blktap_vma = vma;
   8.294 -    blktap_ring_ok = 1;
   8.295 +	rings_vstart = vma->vm_start;
   8.296 +	user_vstart  = rings_vstart + (RING_PAGES << PAGE_SHIFT);
   8.297 +    
   8.298 +	/* Map the ring pages to the start of the region and reserve it. */
   8.299  
   8.300 -    return 0;
   8.301 +	/* not sure if I really need to do this... */
   8.302 +	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
   8.303 +
   8.304 +	if (remap_pfn_range(vma, vma->vm_start, 
   8.305 +			    __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT, 
   8.306 +			    PAGE_SIZE, vma->vm_page_prot)) {
   8.307 +		WPRINTK("Mapping user ring failed!\n");
   8.308 +		goto fail;
   8.309 +	}
   8.310 +
   8.311 +	/* Mark this VM as containing foreign pages, and set up mappings. */
   8.312 +	map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
   8.313 +		      * sizeof(struct page_struct*),
   8.314 +		      GFP_KERNEL);
   8.315 +	if (map == NULL) {
   8.316 +		WPRINTK("Couldn't alloc VM_FOREIGH map.\n");
   8.317 +		goto fail;
   8.318 +	}
   8.319 +
   8.320 +	for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
   8.321 +		map[i] = NULL;
   8.322 +    
   8.323 +	vma->vm_private_data = map;
   8.324 +	vma->vm_flags |= VM_FOREIGN;
   8.325 +
   8.326 +	blktap_vma = vma;
   8.327 +	blktap_ring_ok = 1;
   8.328 +
   8.329 +	return 0;
   8.330   fail:
   8.331 -    /* Clear any active mappings. */
   8.332 -    zap_page_range(vma, vma->vm_start, 
   8.333 -                   vma->vm_end - vma->vm_start, NULL);
   8.334 +	/* Clear any active mappings. */
   8.335 +	zap_page_range(vma, vma->vm_start, 
   8.336 +		       vma->vm_end - vma->vm_start, NULL);
   8.337  
   8.338 -    return -ENOMEM;
   8.339 +	return -ENOMEM;
   8.340  }
   8.341  
   8.342  static int blktap_ioctl(struct inode *inode, struct file *filp,
   8.343                          unsigned int cmd, unsigned long arg)
   8.344  {
   8.345 -    switch(cmd) {
   8.346 -    case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */
   8.347 -        return blktap_read_ufe_ring();
   8.348 +	switch(cmd) {
   8.349 +	case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */
   8.350 +		return blktap_read_ufe_ring();
   8.351  
   8.352 -    case BLKTAP_IOCTL_SETMODE:
   8.353 -        if (BLKTAP_MODE_VALID(arg)) {
   8.354 -            blktap_mode = arg;
   8.355 -            /* XXX: may need to flush rings here. */
   8.356 -            printk(KERN_INFO "blktap: set mode to %lx\n", arg);
   8.357 -            return 0;
   8.358 -        }
   8.359 -    case BLKTAP_IOCTL_PRINT_IDXS:
   8.360 +	case BLKTAP_IOCTL_SETMODE:
   8.361 +		if (BLKTAP_MODE_VALID(arg)) {
   8.362 +			blktap_mode = arg;
   8.363 +			/* XXX: may need to flush rings here. */
   8.364 +			printk(KERN_INFO "blktap: set mode to %lx\n", arg);
   8.365 +			return 0;
   8.366 +		}
   8.367 +	case BLKTAP_IOCTL_PRINT_IDXS:
   8.368          {
   8.369 -            //print_fe_ring_idxs();
   8.370 -            WPRINTK("User Rings: \n-----------\n");
   8.371 -            WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d "
   8.372 -                            "| req_prod: %2d, rsp_prod: %2d\n",
   8.373 -                            blktap_ufe_ring.rsp_cons,
   8.374 -                            blktap_ufe_ring.req_prod_pvt,
   8.375 -                            blktap_ufe_ring.sring->req_prod,
   8.376 -                            blktap_ufe_ring.sring->rsp_prod);
   8.377 +		//print_fe_ring_idxs();
   8.378 +		WPRINTK("User Rings: \n-----------\n");
   8.379 +		WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d "
   8.380 +			"| req_prod: %2d, rsp_prod: %2d\n",
   8.381 +			blktap_ufe_ring.rsp_cons,
   8.382 +			blktap_ufe_ring.req_prod_pvt,
   8.383 +			blktap_ufe_ring.sring->req_prod,
   8.384 +			blktap_ufe_ring.sring->rsp_prod);
   8.385              
   8.386          }
   8.387 -    }
   8.388 -    return -ENOIOCTLCMD;
   8.389 +	}
   8.390 +	return -ENOIOCTLCMD;
   8.391  }
   8.392  
   8.393  static unsigned int blktap_poll(struct file *file, poll_table *wait)
   8.394  {
   8.395 -        poll_wait(file, &blktap_wait, wait);
   8.396 -        if ( RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring) ) 
   8.397 -        {
   8.398 -            flush_tlb_all();
   8.399 +	poll_wait(file, &blktap_wait, wait);
   8.400 +	if (RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring)) {
   8.401 +		flush_tlb_all();
   8.402 +		RING_PUSH_REQUESTS(&blktap_ufe_ring);
   8.403 +		return POLLIN | POLLRDNORM;
   8.404 +	}
   8.405  
   8.406 -            RING_PUSH_REQUESTS(&blktap_ufe_ring);
   8.407 -            return POLLIN | POLLRDNORM;
   8.408 -        }
   8.409 -
   8.410 -        return 0;
   8.411 +	return 0;
   8.412  }
   8.413  
   8.414  void blktap_kick_user(void)
   8.415  {
   8.416 -    /* blktap_ring->req_prod = blktap_req_prod; */
   8.417 -    wake_up_interruptible(&blktap_wait);
   8.418 +	/* blktap_ring->req_prod = blktap_req_prod; */
   8.419 +	wake_up_interruptible(&blktap_wait);
   8.420  }
   8.421  
   8.422  static struct file_operations blktap_fops = {
   8.423 -    owner:    THIS_MODULE,
   8.424 -    poll:     blktap_poll,
   8.425 -    ioctl:    blktap_ioctl,
   8.426 -    open:     blktap_open,
   8.427 -    release:  blktap_release,
   8.428 -    mmap:     blktap_mmap,
   8.429 +	owner:    THIS_MODULE,
   8.430 +	poll:     blktap_poll,
   8.431 +	ioctl:    blktap_ioctl,
   8.432 +	open:     blktap_open,
   8.433 +	release:  blktap_release,
   8.434 +	mmap:     blktap_mmap,
   8.435  };
   8.436  
   8.437  
   8.438 @@ -417,44 +409,44 @@ static void make_response(blkif_t *blkif
   8.439  
   8.440  static void fast_flush_area(int idx, int nr_pages)
   8.441  {
   8.442 -    struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
   8.443 -    unsigned int i, op = 0;
   8.444 -    struct grant_handle_pair *handle;
   8.445 -    unsigned long ptep;
   8.446 -
   8.447 -    for (i=0; i<nr_pages; i++)
   8.448 -    {
   8.449 -        handle = &pending_handle(idx, i);
   8.450 -        if (!BLKTAP_INVALID_HANDLE(handle))
   8.451 -        {
   8.452 -
   8.453 -            unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i);
   8.454 -            unmap[op].dev_bus_addr = 0;
   8.455 -            unmap[op].handle = handle->kernel;
   8.456 -            op++;
   8.457 +	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
   8.458 +	unsigned int i, op = 0;
   8.459 +	struct grant_handle_pair *handle;
   8.460 +	unsigned long ptep;
   8.461  
   8.462 -            if (create_lookup_pte_addr(blktap_vma->vm_mm,
   8.463 -                                       MMAP_VADDR(user_vstart, idx, i), 
   8.464 -                                       &ptep) !=0) {
   8.465 -                DPRINTK("Couldn't get a pte addr!\n");
   8.466 -                return;
   8.467 -            }
   8.468 -            unmap[op].host_addr    = ptep;
   8.469 -            unmap[op].dev_bus_addr = 0;
   8.470 -            unmap[op].handle       = handle->user;
   8.471 -            op++;
   8.472 +	for ( i = 0; i < nr_pages; i++)
   8.473 +	{
   8.474 +		handle = &pending_handle(idx, i);
   8.475 +		if (BLKTAP_INVALID_HANDLE(handle))
   8.476 +			continue;
   8.477 +
   8.478 +		unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i);
   8.479 +		unmap[op].dev_bus_addr = 0;
   8.480 +		unmap[op].handle = handle->kernel;
   8.481 +		op++;
   8.482 +
   8.483 +		if (create_lookup_pte_addr(
   8.484 +			blktap_vma->vm_mm,
   8.485 +			MMAP_VADDR(user_vstart, idx, i), 
   8.486 +			&ptep) !=0) {
   8.487 +			DPRINTK("Couldn't get a pte addr!\n");
   8.488 +			return;
   8.489 +		}
   8.490 +		unmap[op].host_addr    = ptep;
   8.491 +		unmap[op].dev_bus_addr = 0;
   8.492 +		unmap[op].handle       = handle->user;
   8.493 +		op++;
   8.494              
   8.495 -           BLKTAP_INVALIDATE_HANDLE(handle);
   8.496 -        }
   8.497 -    }
   8.498 -    if ( unlikely(HYPERVISOR_grant_table_op(
   8.499 -        GNTTABOP_unmap_grant_ref, unmap, op)))
   8.500 -        BUG();
   8.501 +		BLKTAP_INVALIDATE_HANDLE(handle);
   8.502 +	}
   8.503  
   8.504 -    if (blktap_vma != NULL)
   8.505 -        zap_page_range(blktap_vma, 
   8.506 -                       MMAP_VADDR(user_vstart, idx, 0), 
   8.507 -                       nr_pages << PAGE_SHIFT, NULL);
   8.508 +	BUG_ON(HYPERVISOR_grant_table_op(
   8.509 +		GNTTABOP_unmap_grant_ref, unmap, op));
   8.510 +
   8.511 +	if (blktap_vma != NULL)
   8.512 +		zap_page_range(blktap_vma, 
   8.513 +			       MMAP_VADDR(user_vstart, idx, 0), 
   8.514 +			       nr_pages << PAGE_SHIFT, NULL);
   8.515  }
   8.516  
   8.517  /******************************************************************
   8.518 @@ -466,34 +458,38 @@ static spinlock_t blkio_schedule_list_lo
   8.519  
   8.520  static int __on_blkdev_list(blkif_t *blkif)
   8.521  {
   8.522 -    return blkif->blkdev_list.next != NULL;
   8.523 +	return blkif->blkdev_list.next != NULL;
   8.524  }
   8.525  
   8.526  static void remove_from_blkdev_list(blkif_t *blkif)
   8.527  {
   8.528 -    unsigned long flags;
   8.529 -    if ( !__on_blkdev_list(blkif) ) return;
   8.530 -    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   8.531 -    if ( __on_blkdev_list(blkif) )
   8.532 -    {
   8.533 -        list_del(&blkif->blkdev_list);
   8.534 -        blkif->blkdev_list.next = NULL;
   8.535 -        blkif_put(blkif);
   8.536 -    }
   8.537 -    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   8.538 +	unsigned long flags;
   8.539 +
   8.540 +	if (!__on_blkdev_list(blkif))
   8.541 +		return;
   8.542 +
   8.543 +	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   8.544 +	if (__on_blkdev_list(blkif)) {
   8.545 +		list_del(&blkif->blkdev_list);
   8.546 +		blkif->blkdev_list.next = NULL;
   8.547 +		blkif_put(blkif);
   8.548 +	}
   8.549 +	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   8.550  }
   8.551  
   8.552  static void add_to_blkdev_list_tail(blkif_t *blkif)
   8.553  {
   8.554 -    unsigned long flags;
   8.555 -    if ( __on_blkdev_list(blkif) ) return;
   8.556 -    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   8.557 -    if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
   8.558 -    {
   8.559 -        list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
   8.560 -        blkif_get(blkif);
   8.561 -    }
   8.562 -    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   8.563 +	unsigned long flags;
   8.564 +
   8.565 +	if (__on_blkdev_list(blkif))
   8.566 +		return;
   8.567 +
   8.568 +	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   8.569 +	if (!__on_blkdev_list(blkif) && (blkif->status == CONNECTED)) {
   8.570 +		list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
   8.571 +		blkif_get(blkif);
   8.572 +	}
   8.573 +	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   8.574  }
   8.575  
   8.576  
   8.577 @@ -505,51 +501,50 @@ static DECLARE_WAIT_QUEUE_HEAD(blkio_sch
   8.578  
   8.579  static int blkio_schedule(void *arg)
   8.580  {
   8.581 -    DECLARE_WAITQUEUE(wq, current);
   8.582 -
   8.583 -    blkif_t          *blkif;
   8.584 -    struct list_head *ent;
   8.585 -
   8.586 -    daemonize("xenblkd");
   8.587 +	DECLARE_WAITQUEUE(wq, current);
   8.588  
   8.589 -    for ( ; ; )
   8.590 -    {
   8.591 -        /* Wait for work to do. */
   8.592 -        add_wait_queue(&blkio_schedule_wait, &wq);
   8.593 -        set_current_state(TASK_INTERRUPTIBLE);
   8.594 -        if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
   8.595 -             list_empty(&blkio_schedule_list) )
   8.596 -            schedule();
   8.597 -        __set_current_state(TASK_RUNNING);
   8.598 -        remove_wait_queue(&blkio_schedule_wait, &wq);
   8.599 +	blkif_t          *blkif;
   8.600 +	struct list_head *ent;
   8.601  
   8.602 -        /* Queue up a batch of requests. */
   8.603 -        while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
   8.604 -                !list_empty(&blkio_schedule_list) )
   8.605 -        {
   8.606 -            ent = blkio_schedule_list.next;
   8.607 -            blkif = list_entry(ent, blkif_t, blkdev_list);
   8.608 -            blkif_get(blkif);
   8.609 -            remove_from_blkdev_list(blkif);
   8.610 -            if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
   8.611 -                add_to_blkdev_list_tail(blkif);
   8.612 -            blkif_put(blkif);
   8.613 -        }
   8.614 -    }
   8.615 +	daemonize("xenblkd");
   8.616 +
   8.617 +	for (;;) {
   8.618 +		/* Wait for work to do. */
   8.619 +		add_wait_queue(&blkio_schedule_wait, &wq);
   8.620 +		set_current_state(TASK_INTERRUPTIBLE);
   8.621 +		if ((NR_PENDING_REQS == MAX_PENDING_REQS) || 
   8.622 +		    list_empty(&blkio_schedule_list))
   8.623 +			schedule();
   8.624 +		__set_current_state(TASK_RUNNING);
   8.625 +		remove_wait_queue(&blkio_schedule_wait, &wq);
   8.626 +
   8.627 +		/* Queue up a batch of requests. */
   8.628 +		while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
   8.629 +		       !list_empty(&blkio_schedule_list)) {
   8.630 +			ent = blkio_schedule_list.next;
   8.631 +			blkif = list_entry(ent, blkif_t, blkdev_list);
   8.632 +			blkif_get(blkif);
   8.633 +			remove_from_blkdev_list(blkif);
   8.634 +			if (do_block_io_op(blkif, BATCH_PER_DOMAIN))
   8.635 +				add_to_blkdev_list_tail(blkif);
   8.636 +			blkif_put(blkif);
   8.637 +		}
   8.638 +	}
   8.639  }
   8.640  
   8.641  static void maybe_trigger_blkio_schedule(void)
   8.642  {
   8.643 -    /*
   8.644 -     * Needed so that two processes, who together make the following predicate
   8.645 -     * true, don't both read stale values and evaluate the predicate
   8.646 -     * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
   8.647 -     */
   8.648 -    smp_mb();
   8.649 +	/*
   8.650 +	 * Needed so that two processes, who together make the following
   8.651 +	 * predicate true, don't both read stale values and evaluate the
   8.652 +	 * predicate incorrectly. Incredibly unlikely to stall the scheduler
   8.653 +	 * on the x86, but...
   8.654 +	 */
   8.655 +	smp_mb();
   8.656  
   8.657 -    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
   8.658 -         !list_empty(&blkio_schedule_list) )
   8.659 -        wake_up(&blkio_schedule_wait);
   8.660 +	if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
   8.661 +	    !list_empty(&blkio_schedule_list))
   8.662 +		wake_up(&blkio_schedule_wait);
   8.663  }
   8.664  
   8.665  
   8.666 @@ -561,54 +556,53 @@ static void maybe_trigger_blkio_schedule
   8.667  
   8.668  static int blktap_read_ufe_ring(void)
   8.669  {
   8.670 -    /* This is called to read responses from the UFE ring. */
   8.671 -
   8.672 -    RING_IDX i, j, rp;
   8.673 -    blkif_response_t *resp;
   8.674 -    blkif_t *blkif;
   8.675 -    int pending_idx;
   8.676 -    pending_req_t *pending_req;
   8.677 -    unsigned long     flags;
   8.678 +	/* This is called to read responses from the UFE ring. */
   8.679  
   8.680 -    /* if we are forwarding from UFERring to FERing */
   8.681 -    if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) {
   8.682 +	RING_IDX i, j, rp;
   8.683 +	blkif_response_t *resp;
   8.684 +	blkif_t *blkif;
   8.685 +	int pending_idx;
   8.686 +	pending_req_t *pending_req;
   8.687 +	unsigned long     flags;
   8.688  
   8.689 -        /* for each outstanding message on the UFEring  */
   8.690 -        rp = blktap_ufe_ring.sring->rsp_prod;
   8.691 -        rmb();
   8.692 +	/* if we are forwarding from UFERring to FERing */
   8.693 +	if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) {
   8.694 +
   8.695 +		/* for each outstanding message on the UFEring  */
   8.696 +		rp = blktap_ufe_ring.sring->rsp_prod;
   8.697 +		rmb();
   8.698          
   8.699 -        for ( i = blktap_ufe_ring.rsp_cons; i != rp; i++ )
   8.700 -        {
   8.701 -            resp = RING_GET_RESPONSE(&blktap_ufe_ring, i);
   8.702 -            pending_idx = MASK_PEND_IDX(ID_TO_IDX(resp->id));
   8.703 -            pending_req = &pending_reqs[pending_idx];
   8.704 +		for (i = blktap_ufe_ring.rsp_cons; i != rp; i++) {
   8.705 +			resp = RING_GET_RESPONSE(&blktap_ufe_ring, i);
   8.706 +			pending_idx = MASK_PEND_IDX(ID_TO_IDX(resp->id));
   8.707 +			pending_req = &pending_reqs[pending_idx];
   8.708              
   8.709 -            blkif = pending_req->blkif;
   8.710 -            for (j = 0; j < pending_req->nr_pages; j++) {
   8.711 -                unsigned long vaddr;
   8.712 -                struct page **map = blktap_vma->vm_private_data;
   8.713 -                int offset; 
   8.714 -
   8.715 -                vaddr  = MMAP_VADDR(user_vstart, pending_idx, j);
   8.716 -                offset = (vaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
   8.717 +			blkif = pending_req->blkif;
   8.718 +			for (j = 0; j < pending_req->nr_pages; j++) {
   8.719 +				unsigned long vaddr;
   8.720 +				struct page **map = blktap_vma->vm_private_data;
   8.721 +				int offset; 
   8.722  
   8.723 -                //ClearPageReserved(virt_to_page(vaddr));
   8.724 -                ClearPageReserved((struct page *)map[offset]);
   8.725 -                map[offset] = NULL;
   8.726 -            }
   8.727 +				vaddr  = MMAP_VADDR(user_vstart, pending_idx, j);
   8.728 +				offset = (vaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
   8.729  
   8.730 -            fast_flush_area(pending_idx, pending_req->nr_pages);
   8.731 -            make_response(blkif, pending_req->id, resp->operation, 
   8.732 -                          resp->status);
   8.733 -            blkif_put(pending_req->blkif);
   8.734 -            spin_lock_irqsave(&pend_prod_lock, flags);
   8.735 -            pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   8.736 -            spin_unlock_irqrestore(&pend_prod_lock, flags);
   8.737 -        }
   8.738 -        blktap_ufe_ring.rsp_cons = i;
   8.739 -        maybe_trigger_blkio_schedule();
   8.740 -    }
   8.741 -    return 0;
   8.742 +				//ClearPageReserved(virt_to_page(vaddr));
   8.743 +				ClearPageReserved((struct page *)map[offset]);
   8.744 +				map[offset] = NULL;
   8.745 +			}
   8.746 +
   8.747 +			fast_flush_area(pending_idx, pending_req->nr_pages);
   8.748 +			make_response(blkif, pending_req->id, resp->operation, 
   8.749 +				      resp->status);
   8.750 +			blkif_put(pending_req->blkif);
   8.751 +			spin_lock_irqsave(&pend_prod_lock, flags);
   8.752 +			pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   8.753 +			spin_unlock_irqrestore(&pend_prod_lock, flags);
   8.754 +		}
   8.755 +		blktap_ufe_ring.rsp_cons = i;
   8.756 +		maybe_trigger_blkio_schedule();
   8.757 +	}
   8.758 +	return 0;
   8.759  }
   8.760  
   8.761  
   8.762 @@ -618,10 +612,10 @@ static int blktap_read_ufe_ring(void)
   8.763  
   8.764  irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
   8.765  {
   8.766 -    blkif_t *blkif = dev_id;
   8.767 -    add_to_blkdev_list_tail(blkif);
   8.768 -    maybe_trigger_blkio_schedule();
   8.769 -    return IRQ_HANDLED;
   8.770 +	blkif_t *blkif = dev_id;
   8.771 +	add_to_blkdev_list_tail(blkif);
   8.772 +	maybe_trigger_blkio_schedule();
   8.773 +	return IRQ_HANDLED;
   8.774  }
   8.775  
   8.776  
   8.777 @@ -632,199 +626,194 @@ irqreturn_t blkif_be_int(int irq, void *
   8.778  
   8.779  static int do_block_io_op(blkif_t *blkif, int max_to_do)
   8.780  {
   8.781 -    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   8.782 -    blkif_request_t *req;
   8.783 -    RING_IDX i, rp;
   8.784 -    int more_to_do = 0;
   8.785 +	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   8.786 +	blkif_request_t *req;
   8.787 +	RING_IDX i, rp;
   8.788 +	int more_to_do = 0;
   8.789      
   8.790 -    rp = blk_ring->sring->req_prod;
   8.791 -    rmb(); /* Ensure we see queued requests up to 'rp'. */
   8.792 +	rp = blk_ring->sring->req_prod;
   8.793 +	rmb(); /* Ensure we see queued requests up to 'rp'. */
   8.794  
   8.795 -    for ( i = blk_ring->req_cons; 
   8.796 -         (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
   8.797 -          i++ )
   8.798 -    {
   8.799 -        if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
   8.800 -        {
   8.801 -            more_to_do = 1;
   8.802 -            break;
   8.803 -        }
   8.804 +	for (i = blk_ring->req_cons; 
   8.805 +	     (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
   8.806 +	     i++ ) {
   8.807 +		if ((max_to_do-- == 0) ||
   8.808 +		    (NR_PENDING_REQS == MAX_PENDING_REQS)) {
   8.809 +			more_to_do = 1;
   8.810 +			break;
   8.811 +		}
   8.812          
   8.813 -        req = RING_GET_REQUEST(blk_ring, i);
   8.814 -        switch ( req->operation )
   8.815 -        {
   8.816 -        case BLKIF_OP_READ:
   8.817 -        case BLKIF_OP_WRITE:
   8.818 -            dispatch_rw_block_io(blkif, req);
   8.819 -            break;
   8.820 +		req = RING_GET_REQUEST(blk_ring, i);
   8.821 +		switch (req->operation) {
   8.822 +		case BLKIF_OP_READ:
   8.823 +		case BLKIF_OP_WRITE:
   8.824 +			dispatch_rw_block_io(blkif, req);
   8.825 +			break;
   8.826  
   8.827 -        default:
   8.828 -            DPRINTK("error: unknown block io operation [%d]\n",
   8.829 -                    req->operation);
   8.830 -            make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
   8.831 -            break;
   8.832 -        }
   8.833 -    }
   8.834 +		default:
   8.835 +			DPRINTK("error: unknown block io operation [%d]\n",
   8.836 +				req->operation);
   8.837 +			make_response(blkif, req->id, req->operation,
   8.838 +				      BLKIF_RSP_ERROR);
   8.839 +			break;
   8.840 +		}
   8.841 +	}
   8.842  
   8.843 -    blk_ring->req_cons = i;
   8.844 -    blktap_kick_user();
   8.845 +	blk_ring->req_cons = i;
   8.846 +	blktap_kick_user();
   8.847  
   8.848 -    return more_to_do;
   8.849 +	return more_to_do;
   8.850  }
   8.851  
   8.852  static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
   8.853  {
   8.854 -    blkif_request_t *target;
   8.855 -    int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
   8.856 -    pending_req_t *pending_req;
   8.857 -    struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
   8.858 -    int op, ret;
   8.859 -    unsigned int nseg;
   8.860 +	blkif_request_t *target;
   8.861 +	int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
   8.862 +	pending_req_t *pending_req;
   8.863 +	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
   8.864 +	int op, ret;
   8.865 +	unsigned int nseg;
   8.866  
   8.867 -    /* Check that number of segments is sane. */
   8.868 -    nseg = req->nr_segments;
   8.869 -    if ( unlikely(nseg == 0) || 
   8.870 -         unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
   8.871 -    {
   8.872 -        DPRINTK("Bad number of segments in request (%d)\n", nseg);
   8.873 -        goto bad_descriptor;
   8.874 -    }
   8.875 +	/* Check that number of segments is sane. */
   8.876 +	nseg = req->nr_segments;
   8.877 +	if (unlikely(nseg == 0) || 
   8.878 +	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
   8.879 +		DPRINTK("Bad number of segments in request (%d)\n", nseg);
   8.880 +		goto bad_descriptor;
   8.881 +	}
   8.882  
   8.883 -    /* Make sure userspace is ready. */
   8.884 -    if (!blktap_ring_ok) {
   8.885 -        DPRINTK("blktap: ring not ready for requests!\n");
   8.886 -        goto bad_descriptor;
   8.887 -    }
   8.888 +	/* Make sure userspace is ready. */
   8.889 +	if (!blktap_ring_ok) {
   8.890 +		DPRINTK("blktap: ring not ready for requests!\n");
   8.891 +		goto bad_descriptor;
   8.892 +	}
   8.893      
   8.894  
   8.895 -    if ( RING_FULL(&blktap_ufe_ring) ) {
   8.896 -        WPRINTK("blktap: fe_ring is full, can't add (very broken!).\n");
   8.897 -        goto bad_descriptor;
   8.898 -    }
   8.899 -
   8.900 -    flush_cache_all(); /* a noop on intel... */
   8.901 -
   8.902 -    /* Map the foreign pages directly in to the application */    
   8.903 -    op = 0;
   8.904 -    for (i=0; i<req->nr_segments; i++) {
   8.905 -
   8.906 -        unsigned long uvaddr;
   8.907 -        unsigned long kvaddr;
   8.908 -        unsigned long ptep;
   8.909 -
   8.910 -        uvaddr = MMAP_VADDR(user_vstart, pending_idx, i);
   8.911 -        kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
   8.912 -
   8.913 -        /* Map the remote page to kernel. */
   8.914 -        map[op].host_addr = kvaddr;
   8.915 -        map[op].dom   = blkif->domid;
   8.916 -        map[op].ref   = blkif_gref_from_fas(req->frame_and_sects[i]);
   8.917 -        map[op].flags = GNTMAP_host_map;
   8.918 -        /* This needs a bit more thought in terms of interposition: 
   8.919 -         * If we want to be able to modify pages during write using 
   8.920 -         * grant table mappings, the guest will either need to allow 
   8.921 -         * it, or we'll need to incur a copy. Bit of an fbufs moment. ;) */
   8.922 -        if (req->operation == BLKIF_OP_WRITE)
   8.923 -            map[op].flags |= GNTMAP_readonly;
   8.924 -        op++;
   8.925 -
   8.926 -        /* Now map it to user. */
   8.927 -        ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep);
   8.928 -        if (ret)
   8.929 -        {
   8.930 -            DPRINTK("Couldn't get a pte addr!\n");
   8.931 -            fast_flush_area(pending_idx, req->nr_segments);
   8.932 -            goto bad_descriptor;
   8.933 -        }
   8.934 -
   8.935 -        map[op].host_addr = ptep;
   8.936 -        map[op].dom       = blkif->domid;
   8.937 -        map[op].ref       = blkif_gref_from_fas(req->frame_and_sects[i]);
   8.938 -        map[op].flags     = GNTMAP_host_map | GNTMAP_application_map
   8.939 -                            | GNTMAP_contains_pte;
   8.940 -        /* Above interposition comment applies here as well. */
   8.941 -        if (req->operation == BLKIF_OP_WRITE)
   8.942 -            map[op].flags |= GNTMAP_readonly;
   8.943 -        op++;
   8.944 -    }
   8.945 -
   8.946 -    if ( unlikely(HYPERVISOR_grant_table_op(
   8.947 -            GNTTABOP_map_grant_ref, map, op)))
   8.948 -        BUG();
   8.949 -
   8.950 -    op = 0;
   8.951 -    for (i=0; i<(req->nr_segments*2); i+=2) {
   8.952 -        unsigned long uvaddr;
   8.953 -        unsigned long kvaddr;
   8.954 -        unsigned long offset;
   8.955 -        int cancel = 0;
   8.956 +	if (RING_FULL(&blktap_ufe_ring)) {
   8.957 +		WPRINTK("blktap: fe_ring is full, can't add "
   8.958 +			"(very broken!).\n");
   8.959 +		goto bad_descriptor;
   8.960 +	}
   8.961  
   8.962 -        uvaddr = MMAP_VADDR(user_vstart, pending_idx, i/2);
   8.963 -        kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i/2);
   8.964 -
   8.965 -        if ( unlikely(map[i].handle < 0) ) 
   8.966 -        {
   8.967 -            DPRINTK("Error on kernel grant mapping (%d)\n", map[i].handle);
   8.968 -            ret = map[i].handle;
   8.969 -            cancel = 1;
   8.970 -        }
   8.971 -
   8.972 -        if ( unlikely(map[i+1].handle < 0) ) 
   8.973 -        {
   8.974 -            DPRINTK("Error on user grant mapping (%d)\n", map[i+1].handle);
   8.975 -            ret = map[i+1].handle;
   8.976 -            cancel = 1;
   8.977 -        }
   8.978 -
   8.979 -        if (cancel) 
   8.980 -        {
   8.981 -            fast_flush_area(pending_idx, req->nr_segments);
   8.982 -            goto bad_descriptor;
   8.983 -        }
   8.984 -
   8.985 -        /* Set the necessary mappings in p2m and in the VM_FOREIGN 
   8.986 -         * vm_area_struct to allow user vaddr -> struct page lookups
   8.987 -         * to work.  This is needed for direct IO to foreign pages. */
   8.988 -        phys_to_machine_mapping[__pa(kvaddr) >> PAGE_SHIFT] =
   8.989 -            FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
   8.990 +	flush_cache_all(); /* a noop on intel... */
   8.991  
   8.992 -        offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
   8.993 -        ((struct page **)blktap_vma->vm_private_data)[offset] =
   8.994 -            pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
   8.995 -
   8.996 -        /* Save handles for unmapping later. */
   8.997 -        pending_handle(pending_idx, i/2).kernel = map[i].handle;
   8.998 -        pending_handle(pending_idx, i/2).user   = map[i+1].handle;
   8.999 -    }
  8.1000 -
  8.1001 -    /* Mark mapped pages as reserved: */
  8.1002 -    for ( i = 0; i < req->nr_segments; i++ )
  8.1003 -    {
  8.1004 -        unsigned long kvaddr;
  8.1005 +	/* Map the foreign pages directly in to the application */    
  8.1006 +	op = 0;
  8.1007 +	for (i = 0; i < req->nr_segments; i++) {
  8.1008  
  8.1009 -        kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
  8.1010 -        SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT));
  8.1011 -    }
  8.1012 +		unsigned long uvaddr;
  8.1013 +		unsigned long kvaddr;
  8.1014 +		unsigned long ptep;
  8.1015  
  8.1016 -    pending_req = &pending_reqs[pending_idx];
  8.1017 -    pending_req->blkif     = blkif;
  8.1018 -    pending_req->id        = req->id;
  8.1019 -    pending_req->operation = req->operation;
  8.1020 -    pending_req->status    = BLKIF_RSP_OKAY;
  8.1021 -    pending_req->nr_pages  = nseg;
  8.1022 -    req->id = MAKE_ID(blkif->domid, pending_idx);
  8.1023 -    //atomic_set(&pending_req->pendcnt, nbio);
  8.1024 -    pending_cons++;
  8.1025 -    blkif_get(blkif);
  8.1026 +		uvaddr = MMAP_VADDR(user_vstart, pending_idx, i);
  8.1027 +		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
  8.1028  
  8.1029 -    /* Finally, write the request message to the user ring. */
  8.1030 -    target = RING_GET_REQUEST(&blktap_ufe_ring, blktap_ufe_ring.req_prod_pvt);
  8.1031 -    memcpy(target, req, sizeof(*req));
  8.1032 -    blktap_ufe_ring.req_prod_pvt++;
  8.1033 -    return;
  8.1034 +		/* Map the remote page to kernel. */
  8.1035 +		map[op].host_addr = kvaddr;
  8.1036 +		map[op].dom   = blkif->domid;
  8.1037 +		map[op].ref   = blkif_gref_from_fas(req->frame_and_sects[i]);
  8.1038 +		map[op].flags = GNTMAP_host_map;
  8.1039 +		/* This needs a bit more thought in terms of interposition: 
  8.1040 +		 * If we want to be able to modify pages during write using 
  8.1041 +		 * grant table mappings, the guest will either need to allow 
  8.1042 +		 * it, or we'll need to incur a copy. Bit of an fbufs moment. ;) */
  8.1043 +		if (req->operation == BLKIF_OP_WRITE)
  8.1044 +			map[op].flags |= GNTMAP_readonly;
  8.1045 +		op++;
  8.1046 +
  8.1047 +		/* Now map it to user. */
  8.1048 +		ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep);
  8.1049 +		if (ret) {
  8.1050 +			DPRINTK("Couldn't get a pte addr!\n");
  8.1051 +			fast_flush_area(pending_idx, req->nr_segments);
  8.1052 +			goto bad_descriptor;
  8.1053 +		}
  8.1054 +
  8.1055 +		map[op].host_addr = ptep;
  8.1056 +		map[op].dom       = blkif->domid;
  8.1057 +		map[op].ref       = blkif_gref_from_fas(req->frame_and_sects[i]);
  8.1058 +		map[op].flags     = GNTMAP_host_map | GNTMAP_application_map
  8.1059 +			| GNTMAP_contains_pte;
  8.1060 +		/* Above interposition comment applies here as well. */
  8.1061 +		if (req->operation == BLKIF_OP_WRITE)
  8.1062 +			map[op].flags |= GNTMAP_readonly;
  8.1063 +		op++;
  8.1064 +	}
  8.1065 +
  8.1066 +	BUG_ON(HYPERVISOR_grant_table_op(
  8.1067 +		GNTTABOP_map_grant_ref, map, op));
  8.1068 +
  8.1069 +	op = 0;
  8.1070 +	for (i = 0; i < (req->nr_segments*2); i += 2) {
  8.1071 +		unsigned long uvaddr;
  8.1072 +		unsigned long kvaddr;
  8.1073 +		unsigned long offset;
  8.1074 +		int cancel = 0;
  8.1075 +
  8.1076 +		uvaddr = MMAP_VADDR(user_vstart, pending_idx, i/2);
  8.1077 +		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i/2);
  8.1078 +
  8.1079 +		if (unlikely(map[i].handle < 0)) {
  8.1080 +			DPRINTK("Error on kernel grant mapping (%d)\n",
  8.1081 +				map[i].handle);
  8.1082 +			ret = map[i].handle;
  8.1083 +			cancel = 1;
  8.1084 +		}
  8.1085 +
  8.1086 +		if (unlikely(map[i+1].handle < 0)) {
  8.1087 +			DPRINTK("Error on user grant mapping (%d)\n",
  8.1088 +				map[i+1].handle);
  8.1089 +			ret = map[i+1].handle;
  8.1090 +			cancel = 1;
  8.1091 +		}
  8.1092 +
  8.1093 +		if (cancel) {
  8.1094 +			fast_flush_area(pending_idx, req->nr_segments);
  8.1095 +			goto bad_descriptor;
  8.1096 +		}
  8.1097 +
  8.1098 +		/* Set the necessary mappings in p2m and in the VM_FOREIGN 
  8.1099 +		 * vm_area_struct to allow user vaddr -> struct page lookups
  8.1100 +		 * to work.  This is needed for direct IO to foreign pages. */
  8.1101 +		phys_to_machine_mapping[__pa(kvaddr) >> PAGE_SHIFT] =
  8.1102 +			FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
  8.1103 +
  8.1104 +		offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
  8.1105 +		((struct page **)blktap_vma->vm_private_data)[offset] =
  8.1106 +			pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
  8.1107 +
  8.1108 +		/* Save handles for unmapping later. */
  8.1109 +		pending_handle(pending_idx, i/2).kernel = map[i].handle;
  8.1110 +		pending_handle(pending_idx, i/2).user   = map[i+1].handle;
  8.1111 +	}
  8.1112 +
  8.1113 +	/* Mark mapped pages as reserved: */
  8.1114 +	for (i = 0; i < req->nr_segments; i++) {
  8.1115 +		unsigned long kvaddr;
  8.1116 +		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
  8.1117 +		SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT));
  8.1118 +	}
  8.1119 +
  8.1120 +	pending_req = &pending_reqs[pending_idx];
  8.1121 +	pending_req->blkif     = blkif;
  8.1122 +	pending_req->id        = req->id;
  8.1123 +	pending_req->operation = req->operation;
  8.1124 +	pending_req->status    = BLKIF_RSP_OKAY;
  8.1125 +	pending_req->nr_pages  = nseg;
  8.1126 +	req->id = MAKE_ID(blkif->domid, pending_idx);
  8.1127 +	//atomic_set(&pending_req->pendcnt, nbio);
  8.1128 +	pending_cons++;
  8.1129 +	blkif_get(blkif);
  8.1130 +
  8.1131 +	/* Finally, write the request message to the user ring. */
  8.1132 +	target = RING_GET_REQUEST(&blktap_ufe_ring,
  8.1133 +				  blktap_ufe_ring.req_prod_pvt);
  8.1134 +	memcpy(target, req, sizeof(*req));
  8.1135 +	blktap_ufe_ring.req_prod_pvt++;
  8.1136 +	return;
  8.1137  
  8.1138   bad_descriptor:
  8.1139 -    make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
  8.1140 +	make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
  8.1141  } 
  8.1142  
  8.1143  
  8.1144 @@ -837,80 +826,89 @@ static void dispatch_rw_block_io(blkif_t
  8.1145  static void make_response(blkif_t *blkif, unsigned long id, 
  8.1146                            unsigned short op, int st)
  8.1147  {
  8.1148 -    blkif_response_t *resp;
  8.1149 -    unsigned long     flags;
  8.1150 -    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
  8.1151 +	blkif_response_t *resp;
  8.1152 +	unsigned long     flags;
  8.1153 +	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
  8.1154  
  8.1155 -    /* Place on the response ring for the relevant domain. */ 
  8.1156 -    spin_lock_irqsave(&blkif->blk_ring_lock, flags);
  8.1157 -    resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
  8.1158 -    resp->id        = id;
  8.1159 -    resp->operation = op;
  8.1160 -    resp->status    = st;
  8.1161 -    wmb(); /* Ensure other side can see the response fields. */
  8.1162 -    blk_ring->rsp_prod_pvt++;
  8.1163 -    RING_PUSH_RESPONSES(blk_ring);
  8.1164 -    spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
  8.1165 +	/* Place on the response ring for the relevant domain. */ 
  8.1166 +	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
  8.1167 +	resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
  8.1168 +	resp->id        = id;
  8.1169 +	resp->operation = op;
  8.1170 +	resp->status    = st;
  8.1171 +	wmb(); /* Ensure other side can see the response fields. */
  8.1172 +	blk_ring->rsp_prod_pvt++;
  8.1173 +	RING_PUSH_RESPONSES(blk_ring);
  8.1174 +	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
  8.1175  
  8.1176 -    /* Kick the relevant domain. */
  8.1177 -    notify_via_evtchn(blkif->evtchn);
  8.1178 +	/* Kick the relevant domain. */
  8.1179 +	notify_via_evtchn(blkif->evtchn);
  8.1180  }
  8.1181  
  8.1182  static struct miscdevice blktap_miscdev = {
  8.1183 -    .minor        = BLKTAP_MINOR,
  8.1184 -    .name         = "blktap",
  8.1185 -    .fops         = &blktap_fops,
  8.1186 -    .devfs_name   = "misc/blktap",
  8.1187 +	.minor        = BLKTAP_MINOR,
  8.1188 +	.name         = "blktap",
  8.1189 +	.fops         = &blktap_fops,
  8.1190 +	.devfs_name   = "misc/blktap",
  8.1191  };
  8.1192  
  8.1193  void blkif_deschedule(blkif_t *blkif)
  8.1194  {
  8.1195 -    remove_from_blkdev_list(blkif);
  8.1196 +	remove_from_blkdev_list(blkif);
  8.1197  }
  8.1198  
  8.1199  static int __init blkif_init(void)
  8.1200  {
  8.1201 -    int i, j, err;
  8.1202 -    struct page *page;
  8.1203 +	int i, j, err;
  8.1204 +	struct page *page;
  8.1205  /*
  8.1206 -    if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
  8.1207 -         !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
  8.1208 -        return 0;
  8.1209 +  if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
  8.1210 +  !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
  8.1211 +  return 0;
  8.1212  */
  8.1213 -    blkif_interface_init();
  8.1214 +	blkif_interface_init();
  8.1215  
  8.1216 -    page = balloon_alloc_empty_page_range(MMAP_PAGES);
  8.1217 -    BUG_ON(page == NULL);
  8.1218 -    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
  8.1219 +	page = balloon_alloc_empty_page_range(MMAP_PAGES);
  8.1220 +	BUG_ON(page == NULL);
  8.1221 +	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
  8.1222  
  8.1223 -    pending_cons = 0;
  8.1224 -    pending_prod = MAX_PENDING_REQS;
  8.1225 -    memset(pending_reqs, 0, sizeof(pending_reqs));
  8.1226 -    for ( i = 0; i < MAX_PENDING_REQS; i++ )
  8.1227 -        pending_ring[i] = i;
  8.1228 +	pending_cons = 0;
  8.1229 +	pending_prod = MAX_PENDING_REQS;
  8.1230 +	memset(pending_reqs, 0, sizeof(pending_reqs));
  8.1231 +	for ( i = 0; i < MAX_PENDING_REQS; i++ )
  8.1232 +		pending_ring[i] = i;
  8.1233      
  8.1234 -    spin_lock_init(&blkio_schedule_list_lock);
  8.1235 -    INIT_LIST_HEAD(&blkio_schedule_list);
  8.1236 -
  8.1237 -    if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
  8.1238 -        BUG();
  8.1239 -
  8.1240 -    blkif_xenbus_init();
  8.1241 +	spin_lock_init(&blkio_schedule_list_lock);
  8.1242 +	INIT_LIST_HEAD(&blkio_schedule_list);
  8.1243  
  8.1244 -    for (i=0; i<MAX_PENDING_REQS ; i++)
  8.1245 -        for (j=0; j<BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
  8.1246 -            BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j));
  8.1247 +	BUG_ON(kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0);
  8.1248  
  8.1249 -    err = misc_register(&blktap_miscdev);
  8.1250 -    if ( err != 0 )
  8.1251 -    {
  8.1252 -        printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n", err);
  8.1253 -        return err;
  8.1254 -    }
  8.1255 +	blkif_xenbus_init();
  8.1256  
  8.1257 -    init_waitqueue_head(&blktap_wait);
  8.1258 +	for (i = 0; i < MAX_PENDING_REQS ; i++)
  8.1259 +		for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
  8.1260 +			BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j));
  8.1261  
  8.1262 -    return 0;
  8.1263 +	err = misc_register(&blktap_miscdev);
  8.1264 +	if (err != 0) {
  8.1265 +		printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n",
  8.1266 +		       err);
  8.1267 +		return err;
  8.1268 +	}
  8.1269 +
  8.1270 +	init_waitqueue_head(&blktap_wait);
  8.1271 +
  8.1272 +	return 0;
  8.1273  }
  8.1274  
  8.1275  __initcall(blkif_init);
  8.1276 +
  8.1277 +/*
  8.1278 + * Local variables:
  8.1279 + *  c-file-style: "linux"
  8.1280 + *  indent-tabs-mode: t
  8.1281 + *  c-indent-level: 8
  8.1282 + *  c-basic-offset: 8
  8.1283 + *  tab-width: 8
  8.1284 + * End:
  8.1285 + */
     9.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/common.h	Thu Sep 22 14:01:01 2005 +0100
     9.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/common.h	Thu Sep 22 14:04:14 2005 +0100
     9.3 @@ -33,39 +33,39 @@
     9.4  #define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
     9.5  
     9.6  struct vbd {
     9.7 -    blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
     9.8 -    unsigned char  readonly;    /* Non-zero -> read-only */
     9.9 -    unsigned char  type;        /* VDISK_xxx */
    9.10 -    u32            pdevice;     /* phys device that this vbd maps to */
    9.11 -    struct block_device *bdev;
    9.12 +	blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
    9.13 +	unsigned char  readonly;    /* Non-zero -> read-only */
    9.14 +	unsigned char  type;        /* VDISK_xxx */
    9.15 +	u32            pdevice;     /* phys device that this vbd maps to */
    9.16 +	struct block_device *bdev;
    9.17  }; 
    9.18  
    9.19  typedef struct blkif_st {
    9.20 -    /* Unique identifier for this interface. */
    9.21 -    domid_t           domid;
    9.22 -    unsigned int      handle;
    9.23 -    /* Physical parameters of the comms window. */
    9.24 -    unsigned int      evtchn;
    9.25 -    unsigned int      remote_evtchn;
    9.26 -    /* Comms information. */
    9.27 -    blkif_back_ring_t blk_ring;
    9.28 -    struct vm_struct *blk_ring_area;
    9.29 -    /* VBDs attached to this interface. */
    9.30 -    struct vbd        vbd;
    9.31 -    /* Private fields. */
    9.32 -    enum { DISCONNECTED, CONNECTED } status;
    9.33 +	/* Unique identifier for this interface. */
    9.34 +	domid_t           domid;
    9.35 +	unsigned int      handle;
    9.36 +	/* Physical parameters of the comms window. */
    9.37 +	unsigned int      evtchn;
    9.38 +	unsigned int      remote_evtchn;
    9.39 +	/* Comms information. */
    9.40 +	blkif_back_ring_t blk_ring;
    9.41 +	struct vm_struct *blk_ring_area;
    9.42 +	/* VBDs attached to this interface. */
    9.43 +	struct vbd        vbd;
    9.44 +	/* Private fields. */
    9.45 +	enum { DISCONNECTED, CONNECTED } status;
    9.46  #ifdef CONFIG_XEN_BLKDEV_TAP_BE
    9.47 -    /* Is this a blktap frontend */
    9.48 -    unsigned int     is_blktap;
    9.49 +	/* Is this a blktap frontend */
    9.50 +	unsigned int     is_blktap;
    9.51  #endif
    9.52 -    struct list_head blkdev_list;
    9.53 -    spinlock_t       blk_ring_lock;
    9.54 -    atomic_t         refcnt;
    9.55 +	struct list_head blkdev_list;
    9.56 +	spinlock_t       blk_ring_lock;
    9.57 +	atomic_t         refcnt;
    9.58  
    9.59 -    struct work_struct free_work;
    9.60 +	struct work_struct free_work;
    9.61  
    9.62 -    u16              shmem_handle;
    9.63 -    grant_ref_t      shmem_ref;
    9.64 +	u16              shmem_handle;
    9.65 +	grant_ref_t      shmem_ref;
    9.66  } blkif_t;
    9.67  
    9.68  blkif_t *alloc_blkif(domid_t domid);
    9.69 @@ -89,10 +89,10 @@ unsigned int vbd_info(struct vbd *vbd);
    9.70  unsigned long vbd_secsize(struct vbd *vbd);
    9.71  
    9.72  struct phys_req {
    9.73 -    unsigned short       dev;
    9.74 -    unsigned short       nr_sects;
    9.75 -    struct block_device *bdev;
    9.76 -    blkif_sector_t       sector_number;
    9.77 +	unsigned short       dev;
    9.78 +	unsigned short       nr_sects;
    9.79 +	struct block_device *bdev;
    9.80 +	blkif_sector_t       sector_number;
    9.81  };
    9.82  
    9.83  int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation); 
    9.84 @@ -106,3 +106,13 @@ void blkif_xenbus_init(void);
    9.85  irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
    9.86  
    9.87  #endif /* __BLKIF__BACKEND__COMMON_H__ */
    9.88 +
    9.89 +/*
    9.90 + * Local variables:
    9.91 + *  c-file-style: "linux"
    9.92 + *  indent-tabs-mode: t
    9.93 + *  c-indent-level: 8
    9.94 + *  c-basic-offset: 8
    9.95 + *  tab-width: 8
    9.96 + * End:
    9.97 + */
    10.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c	Thu Sep 22 14:01:01 2005 +0100
    10.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c	Thu Sep 22 14:04:14 2005 +0100
    10.3 @@ -222,3 +222,13 @@ void blkif_xenbus_init(void)
    10.4  {
    10.5  	xenbus_register_backend(&blkback);
    10.6  }
    10.7 +
    10.8 +/*
    10.9 + * Local variables:
   10.10 + *  c-file-style: "linux"
   10.11 + *  indent-tabs-mode: t
   10.12 + *  c-indent-level: 8
   10.13 + *  c-basic-offset: 8
   10.14 + *  tab-width: 8
   10.15 + * End:
   10.16 + */
    11.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/console.c	Thu Sep 22 14:01:01 2005 +0100
    11.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/console.c	Thu Sep 22 14:04:14 2005 +0100
    11.3 @@ -75,31 +75,33 @@ extern int sysrq_enabled;
    11.4  
    11.5  static int __init xencons_setup(char *str)
    11.6  {
    11.7 -    char *q;
    11.8 -    int n;
    11.9 -
   11.10 -    if ( !strncmp(str, "ttyS", 4) )
   11.11 -        xc_mode = XC_SERIAL;
   11.12 -    else if ( !strncmp(str, "tty", 3) )
   11.13 -        xc_mode = XC_TTY;
   11.14 -    else if ( !strncmp(str, "off", 3) )
   11.15 -        xc_mode = XC_OFF;
   11.16 +	char *q;
   11.17 +	int n;
   11.18  
   11.19 -    switch ( xc_mode )
   11.20 -    {
   11.21 -    case XC_SERIAL:
   11.22 -        n = simple_strtol( str+4, &q, 10 );
   11.23 -        if ( q > (str + 4) ) xc_num = n;
   11.24 -        break;
   11.25 -    case XC_TTY:
   11.26 -        n = simple_strtol( str+3, &q, 10 );
   11.27 -        if ( q > (str + 3) ) xc_num = n;
   11.28 -        break;
   11.29 -    default:
   11.30 -        break;
   11.31 -    }
   11.32 +	if (!strncmp(str, "ttyS", 4))
   11.33 +		xc_mode = XC_SERIAL;
   11.34 +	else if (!strncmp(str, "tty", 3))
   11.35 +		xc_mode = XC_TTY;
   11.36 +	else if (!strncmp(str, "off", 3))
   11.37 +		xc_mode = XC_OFF;
   11.38  
   11.39 -    return 1;
   11.40 +	switch ( xc_mode )
   11.41 +	{
   11.42 +	case XC_SERIAL:
   11.43 +		n = simple_strtol(str+4, &q, 10);
   11.44 +		if (q > (str + 4))
   11.45 +			xc_num = n;
   11.46 +		break;
   11.47 +	case XC_TTY:
   11.48 +		n = simple_strtol(str+3, &q, 10);
   11.49 +		if (q > (str + 3))
   11.50 +			xc_num = n;
   11.51 +		break;
   11.52 +	default:
   11.53 +		break;
   11.54 +	}
   11.55 +
   11.56 +	return 1;
   11.57  }
   11.58  __setup("xencons=", xencons_setup);
   11.59  
   11.60 @@ -111,11 +113,11 @@ static unsigned int wc, wp; /* write_con
   11.61  
   11.62  static int __init xencons_bufsz_setup(char *str)
   11.63  {
   11.64 -    unsigned int goal;
   11.65 -    goal = simple_strtoul(str, NULL, 0);
   11.66 -    while ( wbuf_size < goal )
   11.67 -        wbuf_size <<= 1;
   11.68 -    return 1;
   11.69 +	unsigned int goal;
   11.70 +	goal = simple_strtoul(str, NULL, 0);
   11.71 +	while (wbuf_size < goal)
   11.72 +		wbuf_size <<= 1;
   11.73 +	return 1;
   11.74  }
   11.75  __setup("xencons_bufsz=", xencons_bufsz_setup);
   11.76  
   11.77 @@ -135,57 +137,55 @@ static struct tty_driver xencons_driver;
   11.78  /******************** Kernel console driver ********************************/
   11.79  
   11.80  static void kcons_write(
   11.81 -    struct console *c, const char *s, unsigned int count)
   11.82 +	struct console *c, const char *s, unsigned int count)
   11.83  {
   11.84 -    int           i;
   11.85 -    unsigned long flags;
   11.86 +	int           i;
   11.87 +	unsigned long flags;
   11.88  
   11.89 -    spin_lock_irqsave(&xencons_lock, flags);
   11.90 +	spin_lock_irqsave(&xencons_lock, flags);
   11.91      
   11.92 -    for ( i = 0; i < count; i++ )
   11.93 -    {
   11.94 -        if ( (wp - wc) >= (wbuf_size - 1) )
   11.95 -            break;
   11.96 -        if ( (wbuf[WBUF_MASK(wp++)] = s[i]) == '\n' )
   11.97 -            wbuf[WBUF_MASK(wp++)] = '\r';
   11.98 -    }
   11.99 +	for (i = 0; i < count; i++) {
  11.100 +		if ((wp - wc) >= (wbuf_size - 1))
  11.101 +			break;
  11.102 +		if ((wbuf[WBUF_MASK(wp++)] = s[i]) == '\n')
  11.103 +			wbuf[WBUF_MASK(wp++)] = '\r';
  11.104 +	}
  11.105  
  11.106 -    __xencons_tx_flush();
  11.107 +	__xencons_tx_flush();
  11.108  
  11.109 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.110 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.111  }
  11.112  
  11.113  static void kcons_write_dom0(
  11.114 -    struct console *c, const char *s, unsigned int count)
  11.115 +	struct console *c, const char *s, unsigned int count)
  11.116  {
  11.117 -    int rc;
  11.118 +	int rc;
  11.119  
  11.120 -    while ( (count > 0) &&
  11.121 -            ((rc = HYPERVISOR_console_io(
  11.122 -                CONSOLEIO_write, count, (char *)s)) > 0) )
  11.123 -    {
  11.124 -        count -= rc;
  11.125 -        s += rc;
  11.126 -    }
  11.127 +	while ((count > 0) &&
  11.128 +	       ((rc = HYPERVISOR_console_io(
  11.129 +			CONSOLEIO_write, count, (char *)s)) > 0)) {
  11.130 +		count -= rc;
  11.131 +		s += rc;
  11.132 +	}
  11.133  }
  11.134  
  11.135  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  11.136  static struct tty_driver *kcons_device(struct console *c, int *index)
  11.137  {
  11.138 -    *index = c->index;
  11.139 -    return xencons_driver;
  11.140 +	*index = c->index;
  11.141 +	return xencons_driver;
  11.142  }
  11.143  #else
  11.144  static kdev_t kcons_device(struct console *c)
  11.145  {
  11.146 -    return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1);
  11.147 +	return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1);
  11.148  }
  11.149  #endif
  11.150  
  11.151  static struct console kcons_info = {
  11.152 -    .device	= kcons_device,
  11.153 -    .flags	= CON_PRINTBUFFER,
  11.154 -    .index	= -1,
  11.155 +	.device	= kcons_device,
  11.156 +	.flags	= CON_PRINTBUFFER,
  11.157 +	.index	= -1,
  11.158  };
  11.159  
  11.160  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  11.161 @@ -196,44 +196,42 @@ static int __init xen_console_init(void)
  11.162  void xen_console_init(void)
  11.163  #endif
  11.164  {
  11.165 -    if ( xen_start_info->flags & SIF_INITDOMAIN )
  11.166 -    {
  11.167 -        if ( xc_mode == XC_DEFAULT )
  11.168 -            xc_mode = XC_SERIAL;
  11.169 -        kcons_info.write = kcons_write_dom0;
  11.170 +	if (xen_start_info->flags & SIF_INITDOMAIN) {
  11.171 +		if (xc_mode == XC_DEFAULT)
  11.172 +			xc_mode = XC_SERIAL;
  11.173 +		kcons_info.write = kcons_write_dom0;
  11.174  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  11.175 -        if ( xc_mode == XC_SERIAL )
  11.176 -            kcons_info.flags |= CON_ENABLED;
  11.177 +		if (xc_mode == XC_SERIAL)
  11.178 +			kcons_info.flags |= CON_ENABLED;
  11.179  #endif
  11.180 -    }
  11.181 -    else
  11.182 -    {
  11.183 -        if ( xc_mode == XC_DEFAULT )
  11.184 -            xc_mode = XC_TTY;
  11.185 -        kcons_info.write = kcons_write;
  11.186 -    }
  11.187 +	} else {
  11.188 +		if (xc_mode == XC_DEFAULT)
  11.189 +			xc_mode = XC_TTY;
  11.190 +		kcons_info.write = kcons_write;
  11.191 +	}
  11.192  
  11.193 -    switch ( xc_mode )
  11.194 -    {
  11.195 -    case XC_SERIAL:
  11.196 -        strcpy(kcons_info.name, "ttyS");
  11.197 -        if ( xc_num == -1 ) xc_num = 0;
  11.198 -        break;
  11.199 +	switch (xc_mode) {
  11.200 +	case XC_SERIAL:
  11.201 +		strcpy(kcons_info.name, "ttyS");
  11.202 +		if (xc_num == -1)
  11.203 +			xc_num = 0;
  11.204 +		break;
  11.205  
  11.206 -    case XC_TTY:
  11.207 -        strcpy(kcons_info.name, "tty");
  11.208 -        if ( xc_num == -1 ) xc_num = 1;
  11.209 -        break;
  11.210 +	case XC_TTY:
  11.211 +		strcpy(kcons_info.name, "tty");
  11.212 +		if (xc_num == -1)
  11.213 +			xc_num = 1;
  11.214 +		break;
  11.215  
  11.216 -    default:
  11.217 -        return __RETCODE;
  11.218 -    }
  11.219 +	default:
  11.220 +		return __RETCODE;
  11.221 +	}
  11.222  
  11.223 -    wbuf = alloc_bootmem(wbuf_size);
  11.224 +	wbuf = alloc_bootmem(wbuf_size);
  11.225  
  11.226 -    register_console(&kcons_info);
  11.227 +	register_console(&kcons_info);
  11.228  
  11.229 -    return __RETCODE;
  11.230 +	return __RETCODE;
  11.231  }
  11.232  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  11.233  console_initcall(xen_console_init);
  11.234 @@ -246,41 +244,40 @@ asmlinkage int xprintk(const char *fmt, 
  11.235  asmlinkage int xprintk(const char *fmt, ...)
  11.236  #endif
  11.237  {
  11.238 -    va_list args;
  11.239 -    int printk_len;
  11.240 -    static char printk_buf[1024];
  11.241 +	va_list args;
  11.242 +	int printk_len;
  11.243 +	static char printk_buf[1024];
  11.244      
  11.245 -    /* Emit the output into the temporary buffer */
  11.246 -    va_start(args, fmt);
  11.247 -    printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
  11.248 -    va_end(args);
  11.249 +	/* Emit the output into the temporary buffer */
  11.250 +	va_start(args, fmt);
  11.251 +	printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
  11.252 +	va_end(args);
  11.253  
  11.254 -    /* Send the processed output directly to Xen. */
  11.255 -    kcons_write_dom0(NULL, printk_buf, printk_len);
  11.256 +	/* Send the processed output directly to Xen. */
  11.257 +	kcons_write_dom0(NULL, printk_buf, printk_len);
  11.258  
  11.259 -    return 0;
  11.260 +	return 0;
  11.261  }
  11.262  
  11.263  /*** Forcibly flush console data before dying. ***/
  11.264  void xencons_force_flush(void)
  11.265  {
  11.266 -    int        sz;
  11.267 +	int sz;
  11.268  
  11.269 -    /* Emergency console is synchronous, so there's nothing to flush. */
  11.270 -    if ( xen_start_info->flags & SIF_INITDOMAIN )
  11.271 -        return;
  11.272 +	/* Emergency console is synchronous, so there's nothing to flush. */
  11.273 +	if (xen_start_info->flags & SIF_INITDOMAIN)
  11.274 +		return;
  11.275  
  11.276  
  11.277 -    /* Spin until console data is flushed through to the domain controller. */
  11.278 -    while ( (wc != wp) )
  11.279 -    {
  11.280 -	int sent = 0;
  11.281 -        if ( (sz = wp - wc) == 0 )
  11.282 -            continue;
  11.283 -	sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
  11.284 -	if (sent > 0)
  11.285 -	    wc += sent;
  11.286 -    }
  11.287 +	/* Spin until console data is flushed through to the daemon. */
  11.288 +	while (wc != wp) {
  11.289 +		int sent = 0;
  11.290 +		if ((sz = wp - wc) == 0)
  11.291 +			continue;
  11.292 +		sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
  11.293 +		if (sent > 0)
  11.294 +			wc += sent;
  11.295 +	}
  11.296  }
  11.297  
  11.298  
  11.299 @@ -305,362 +302,358 @@ static char x_char;
  11.300  /* Non-privileged receive callback. */
  11.301  static void xencons_rx(char *buf, unsigned len, struct pt_regs *regs)
  11.302  {
  11.303 -    int           i;
  11.304 -    unsigned long flags;
  11.305 +	int           i;
  11.306 +	unsigned long flags;
  11.307  
  11.308 -    spin_lock_irqsave(&xencons_lock, flags);
  11.309 -    if ( xencons_tty != NULL )
  11.310 -    {
  11.311 -        for ( i = 0; i < len; i++ ) {
  11.312 +	spin_lock_irqsave(&xencons_lock, flags);
  11.313 +	if (xencons_tty == NULL)
  11.314 +		goto out;
  11.315 +
  11.316 +	for (i = 0; i < len; i++) {
  11.317  #ifdef CONFIG_MAGIC_SYSRQ
  11.318 -            if (sysrq_enabled) {
  11.319 -                if (buf[i] == '\x0f') { /* ^O */
  11.320 -                    sysrq_requested = jiffies;
  11.321 -                    continue; /* don't print the sysrq key */
  11.322 -                } else if (sysrq_requested) {
  11.323 -                    unsigned long sysrq_timeout = sysrq_requested + HZ*2;
  11.324 -                    sysrq_requested = 0;
  11.325 -                    /* if it's been less than a timeout, do the sysrq */
  11.326 -                    if (time_before(jiffies, sysrq_timeout)) {
  11.327 -                        spin_unlock_irqrestore(&xencons_lock, flags);
  11.328 -                        handle_sysrq(buf[i], regs, xencons_tty);
  11.329 -                        spin_lock_irqsave(&xencons_lock, flags);
  11.330 -                        continue;
  11.331 -                    }
  11.332 -                }
  11.333 -            }
  11.334 +		if (sysrq_enabled) {
  11.335 +			if (buf[i] == '\x0f') { /* ^O */
  11.336 +				sysrq_requested = jiffies;
  11.337 +				continue; /* don't print the sysrq key */
  11.338 +			} else if (sysrq_requested) {
  11.339 +				unsigned long sysrq_timeout =
  11.340 +					sysrq_requested + HZ*2;
  11.341 +				sysrq_requested = 0;
  11.342 +				if (time_before(jiffies, sysrq_timeout)) {
  11.343 +					spin_unlock_irqrestore(
  11.344 +						&xencons_lock, flags);
  11.345 +					handle_sysrq(
  11.346 +						buf[i], regs, xencons_tty);
  11.347 +					spin_lock_irqsave(
  11.348 +						&xencons_lock, flags);
  11.349 +					continue;
  11.350 +				}
  11.351 +			}
  11.352 +		}
  11.353  #endif
  11.354 -            tty_insert_flip_char(xencons_tty, buf[i], 0);
  11.355 -        }
  11.356 -        tty_flip_buffer_push(xencons_tty);
  11.357 -    }
  11.358 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.359 +		tty_insert_flip_char(xencons_tty, buf[i], 0);
  11.360 +	}
  11.361 +	tty_flip_buffer_push(xencons_tty);
  11.362  
  11.363 + out:
  11.364 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.365  }
  11.366  
  11.367  /* Privileged and non-privileged transmit worker. */
  11.368  static void __xencons_tx_flush(void)
  11.369  {
  11.370 -    int        sz, work_done = 0;
  11.371 -
  11.372 -    if ( xen_start_info->flags & SIF_INITDOMAIN )
  11.373 -    {
  11.374 -        if ( x_char )
  11.375 -        {
  11.376 -            kcons_write_dom0(NULL, &x_char, 1);
  11.377 -            x_char = 0;
  11.378 -            work_done = 1;
  11.379 -        }
  11.380 +	int sz, work_done = 0;
  11.381  
  11.382 -        while ( wc != wp )
  11.383 -        {
  11.384 -            sz = wp - wc;
  11.385 -            if ( sz > (wbuf_size - WBUF_MASK(wc)) )
  11.386 -                sz = wbuf_size - WBUF_MASK(wc);
  11.387 -            kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
  11.388 -            wc += sz;
  11.389 -            work_done = 1;
  11.390 -        }
  11.391 -    }
  11.392 -    else
  11.393 -    {
  11.394 -        while ( x_char )
  11.395 -        {
  11.396 -	    if (xencons_ring_send(&x_char, 1) == 1) {
  11.397 -		x_char = 0;
  11.398 -		work_done = 1;
  11.399 -	    }
  11.400 -        }
  11.401 +	if (xen_start_info->flags & SIF_INITDOMAIN) {
  11.402 +		if (x_char) {
  11.403 +			kcons_write_dom0(NULL, &x_char, 1);
  11.404 +			x_char = 0;
  11.405 +			work_done = 1;
  11.406 +		}
  11.407  
  11.408 -        while ( wc != wp )
  11.409 -        {
  11.410 -	    int sent;
  11.411 -            sz = wp - wc;
  11.412 -	    if ( sz > (wbuf_size - WBUF_MASK(wc)) )
  11.413 -		sz = wbuf_size - WBUF_MASK(wc);
  11.414 -	    sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
  11.415 -	    if ( sent > 0 ) {
  11.416 -		wc += sent;
  11.417 -		work_done = 1;
  11.418 -	    }
  11.419 -        }
  11.420 -    }
  11.421 +		while (wc != wp) {
  11.422 +			sz = wp - wc;
  11.423 +			if (sz > (wbuf_size - WBUF_MASK(wc)))
  11.424 +				sz = wbuf_size - WBUF_MASK(wc);
  11.425 +			kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
  11.426 +			wc += sz;
  11.427 +			work_done = 1;
  11.428 +		}
  11.429 +	} else {
  11.430 +		while (x_char) {
  11.431 +			if (xencons_ring_send(&x_char, 1) == 1) {
  11.432 +				x_char = 0;
  11.433 +				work_done = 1;
  11.434 +			}
  11.435 +		}
  11.436  
  11.437 -    if ( work_done && (xencons_tty != NULL) )
  11.438 -    {
  11.439 -        wake_up_interruptible(&xencons_tty->write_wait);
  11.440 -        if ( (xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
  11.441 -             (xencons_tty->ldisc.write_wakeup != NULL) )
  11.442 -            (xencons_tty->ldisc.write_wakeup)(xencons_tty);
  11.443 -    }
  11.444 +		while (wc != wp) {
  11.445 +			int sent;
  11.446 +			sz = wp - wc;
  11.447 +			if (sz > (wbuf_size - WBUF_MASK(wc)))
  11.448 +				sz = wbuf_size - WBUF_MASK(wc);
  11.449 +			sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
  11.450 +			if (sent > 0) {
  11.451 +				wc += sent;
  11.452 +				work_done = 1;
  11.453 +			}
  11.454 +		}
  11.455 +	}
  11.456 +
  11.457 +	if (work_done && (xencons_tty != NULL))
  11.458 +	{
  11.459 +		wake_up_interruptible(&xencons_tty->write_wait);
  11.460 +		if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
  11.461 +		    (xencons_tty->ldisc.write_wakeup != NULL))
  11.462 +			(xencons_tty->ldisc.write_wakeup)(xencons_tty);
  11.463 +	}
  11.464  }
  11.465  
  11.466  /* Privileged receive callback and transmit kicker. */
  11.467  static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
  11.468                                            struct pt_regs *regs)
  11.469  {
  11.470 -    static char   rbuf[16];
  11.471 -    int           i, l;
  11.472 -    unsigned long flags;
  11.473 -
  11.474 -    spin_lock_irqsave(&xencons_lock, flags);
  11.475 +	static char   rbuf[16];
  11.476 +	int           i, l;
  11.477 +	unsigned long flags;
  11.478  
  11.479 -    if ( xencons_tty != NULL )
  11.480 -    {
  11.481 -        /* Receive work. */
  11.482 -        while ( (l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0 )
  11.483 -            for ( i = 0; i < l; i++ )
  11.484 -                tty_insert_flip_char(xencons_tty, rbuf[i], 0);
  11.485 -        if ( xencons_tty->flip.count != 0 )
  11.486 -            tty_flip_buffer_push(xencons_tty);
  11.487 -    }
  11.488 +	spin_lock_irqsave(&xencons_lock, flags);
  11.489  
  11.490 -    /* Transmit work. */
  11.491 -    __xencons_tx_flush();
  11.492 +	if (xencons_tty != NULL)
  11.493 +	{
  11.494 +		/* Receive work. */
  11.495 +		while ((l = HYPERVISOR_console_io(
  11.496 +			CONSOLEIO_read, 16, rbuf)) > 0)
  11.497 +			for (i = 0; i < l; i++)
  11.498 +				tty_insert_flip_char(xencons_tty, rbuf[i], 0);
  11.499 +		if (xencons_tty->flip.count != 0)
  11.500 +			tty_flip_buffer_push(xencons_tty);
  11.501 +	}
  11.502  
  11.503 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.504 +	/* Transmit work. */
  11.505 +	__xencons_tx_flush();
  11.506  
  11.507 -    return IRQ_HANDLED;
  11.508 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.509 +
  11.510 +	return IRQ_HANDLED;
  11.511  }
  11.512  
  11.513  static int xencons_write_room(struct tty_struct *tty)
  11.514  {
  11.515 -    return wbuf_size - (wp - wc);
  11.516 +	return wbuf_size - (wp - wc);
  11.517  }
  11.518  
  11.519  static int xencons_chars_in_buffer(struct tty_struct *tty)
  11.520  {
  11.521 -    return wp - wc;
  11.522 +	return wp - wc;
  11.523  }
  11.524  
  11.525  static void xencons_send_xchar(struct tty_struct *tty, char ch)
  11.526  {
  11.527 -    unsigned long flags;
  11.528 +	unsigned long flags;
  11.529  
  11.530 -    if ( TTY_INDEX(tty) != 0 )
  11.531 -        return;
  11.532 +	if (TTY_INDEX(tty) != 0)
  11.533 +		return;
  11.534  
  11.535 -    spin_lock_irqsave(&xencons_lock, flags);
  11.536 -    x_char = ch;
  11.537 -    __xencons_tx_flush();
  11.538 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.539 +	spin_lock_irqsave(&xencons_lock, flags);
  11.540 +	x_char = ch;
  11.541 +	__xencons_tx_flush();
  11.542 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.543  }
  11.544  
  11.545  static void xencons_throttle(struct tty_struct *tty)
  11.546  {
  11.547 -    if ( TTY_INDEX(tty) != 0 )
  11.548 -        return;
  11.549 +	if (TTY_INDEX(tty) != 0)
  11.550 +		return;
  11.551  
  11.552 -    if ( I_IXOFF(tty) )
  11.553 -        xencons_send_xchar(tty, STOP_CHAR(tty));
  11.554 +	if (I_IXOFF(tty))
  11.555 +		xencons_send_xchar(tty, STOP_CHAR(tty));
  11.556  }
  11.557  
  11.558  static void xencons_unthrottle(struct tty_struct *tty)
  11.559  {
  11.560 -    if ( TTY_INDEX(tty) != 0 )
  11.561 -        return;
  11.562 +	if (TTY_INDEX(tty) != 0)
  11.563 +		return;
  11.564  
  11.565 -    if ( I_IXOFF(tty) )
  11.566 -    {
  11.567 -        if ( x_char != 0 )
  11.568 -            x_char = 0;
  11.569 -        else
  11.570 -            xencons_send_xchar(tty, START_CHAR(tty));
  11.571 -    }
  11.572 +	if (I_IXOFF(tty)) {
  11.573 +		if (x_char != 0)
  11.574 +			x_char = 0;
  11.575 +		else
  11.576 +			xencons_send_xchar(tty, START_CHAR(tty));
  11.577 +	}
  11.578  }
  11.579  
  11.580  static void xencons_flush_buffer(struct tty_struct *tty)
  11.581  {
  11.582 -    unsigned long flags;
  11.583 +	unsigned long flags;
  11.584  
  11.585 -    if ( TTY_INDEX(tty) != 0 )
  11.586 -        return;
  11.587 +	if (TTY_INDEX(tty) != 0)
  11.588 +		return;
  11.589  
  11.590 -    spin_lock_irqsave(&xencons_lock, flags);
  11.591 -    wc = wp = 0;
  11.592 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.593 +	spin_lock_irqsave(&xencons_lock, flags);
  11.594 +	wc = wp = 0;
  11.595 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.596  }
  11.597  
  11.598  static inline int __xencons_put_char(int ch)
  11.599  {
  11.600 -    char _ch = (char)ch;
  11.601 -    if ( (wp - wc) == wbuf_size )
  11.602 -        return 0;
  11.603 -    wbuf[WBUF_MASK(wp++)] = _ch;
  11.604 -    return 1;
  11.605 +	char _ch = (char)ch;
  11.606 +	if ((wp - wc) == wbuf_size)
  11.607 +		return 0;
  11.608 +	wbuf[WBUF_MASK(wp++)] = _ch;
  11.609 +	return 1;
  11.610  }
  11.611  
  11.612  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  11.613  static int xencons_write(
  11.614 -    struct tty_struct *tty,
  11.615 -    const unsigned char *buf,
  11.616 -    int count)
  11.617 +	struct tty_struct *tty,
  11.618 +	const unsigned char *buf,
  11.619 +	int count)
  11.620  {
  11.621 -    int i;
  11.622 -    unsigned long flags;
  11.623 -
  11.624 -    if ( TTY_INDEX(tty) != 0 )
  11.625 -        return count;
  11.626 +	int i;
  11.627 +	unsigned long flags;
  11.628  
  11.629 -    spin_lock_irqsave(&xencons_lock, flags);
  11.630 -
  11.631 -    for ( i = 0; i < count; i++ )
  11.632 -        if ( !__xencons_put_char(buf[i]) )
  11.633 -            break;
  11.634 +	if (TTY_INDEX(tty) != 0)
  11.635 +		return count;
  11.636  
  11.637 -    if ( i != 0 )
  11.638 -        __xencons_tx_flush();
  11.639 +	spin_lock_irqsave(&xencons_lock, flags);
  11.640  
  11.641 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.642 +	for (i = 0; i < count; i++)
  11.643 +		if (!__xencons_put_char(buf[i]))
  11.644 +			break;
  11.645  
  11.646 -    return i;
  11.647 +	if (i != 0)
  11.648 +		__xencons_tx_flush();
  11.649 +
  11.650 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.651 +
  11.652 +	return i;
  11.653  }
  11.654  #else
  11.655  static int xencons_write(
  11.656 -    struct tty_struct *tty, 
  11.657 -    int from_user,
  11.658 -    const u_char *buf, 
  11.659 -    int count)
  11.660 +	struct tty_struct *tty, 
  11.661 +	int from_user,
  11.662 +	const u_char *buf, 
  11.663 +	int count)
  11.664  {
  11.665 -    int i;
  11.666 -    unsigned long flags;
  11.667 -
  11.668 -    if ( from_user && verify_area(VERIFY_READ, buf, count) )
  11.669 -        return -EINVAL;
  11.670 -
  11.671 -    if ( TTY_INDEX(tty) != 0 )
  11.672 -        return count;
  11.673 -
  11.674 -    spin_lock_irqsave(&xencons_lock, flags);
  11.675 +	int i;
  11.676 +	unsigned long flags;
  11.677  
  11.678 -    for ( i = 0; i < count; i++ )
  11.679 -    {
  11.680 -        char ch;
  11.681 -        if ( from_user )
  11.682 -            __get_user(ch, buf + i);
  11.683 -        else
  11.684 -            ch = buf[i];
  11.685 -        if ( !__xencons_put_char(ch) )
  11.686 -            break;
  11.687 -    }
  11.688 +	if (from_user && verify_area(VERIFY_READ, buf, count))
  11.689 +		return -EINVAL;
  11.690  
  11.691 -    if ( i != 0 )
  11.692 -        __xencons_tx_flush();
  11.693 +	if (TTY_INDEX(tty) != 0)
  11.694 +		return count;
  11.695  
  11.696 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.697 +	spin_lock_irqsave(&xencons_lock, flags);
  11.698  
  11.699 -    return i;
  11.700 +	for (i = 0; i < count; i++) {
  11.701 +		char ch;
  11.702 +		if (from_user)
  11.703 +			__get_user(ch, buf + i);
  11.704 +		else
  11.705 +			ch = buf[i];
  11.706 +		if (!__xencons_put_char(ch))
  11.707 +			break;
  11.708 +	}
  11.709 +
  11.710 +	if (i != 0)
  11.711 +		__xencons_tx_flush();
  11.712 +
  11.713 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.714 +
  11.715 +	return i;
  11.716  }
  11.717  #endif
  11.718  
  11.719  static void xencons_put_char(struct tty_struct *tty, u_char ch)
  11.720  {
  11.721 -    unsigned long flags;
  11.722 +	unsigned long flags;
  11.723  
  11.724 -    if ( TTY_INDEX(tty) != 0 )
  11.725 -        return;
  11.726 +	if (TTY_INDEX(tty) != 0)
  11.727 +		return;
  11.728  
  11.729 -    spin_lock_irqsave(&xencons_lock, flags);
  11.730 -    (void)__xencons_put_char(ch);
  11.731 -    spin_unlock_irqrestore(&xencons_lock, flags);
  11.732 +	spin_lock_irqsave(&xencons_lock, flags);
  11.733 +	(void)__xencons_put_char(ch);
  11.734 +	spin_unlock_irqrestore(&xencons_lock, flags);
  11.735  }
  11.736  
  11.737  static void xencons_flush_chars(struct tty_struct *tty)
  11.738  {
  11.739 -    unsigned long flags;
  11.740 +	unsigned long flags;
  11.741  
  11.742 -    if ( TTY_INDEX(tty) != 0 )
  11.743 -        return;
  11.744 +	if (TTY_INDEX(tty) != 0)
  11.745 +		return;
  11.746  
  11.747 -    spin_lock_irqsave(&xencons_lock, flags);
  11.748 -    __xencons_tx_flush();
  11.749 -    spin_unlock_irqrestore(&xencons_lock, flags);    
  11.750 +	spin_lock_irqsave(&xencons_lock, flags);
  11.751 +	__xencons_tx_flush();
  11.752 +	spin_unlock_irqrestore(&xencons_lock, flags);    
  11.753  }
  11.754  
  11.755  static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
  11.756  {
  11.757 -    unsigned long orig_jiffies = jiffies;
  11.758 -
  11.759 -    if ( TTY_INDEX(tty) != 0 )
  11.760 -        return;
  11.761 +	unsigned long orig_jiffies = jiffies;
  11.762  
  11.763 -    while ( DRV(tty->driver)->chars_in_buffer(tty) )
  11.764 -    {
  11.765 -        set_current_state(TASK_INTERRUPTIBLE);
  11.766 -        schedule_timeout(1);
  11.767 -        if ( signal_pending(current) )
  11.768 -            break;
  11.769 -        if ( (timeout != 0) && time_after(jiffies, orig_jiffies + timeout) )
  11.770 -            break;
  11.771 -    }
  11.772 +	if (TTY_INDEX(tty) != 0)
  11.773 +		return;
  11.774 +
  11.775 +	while (DRV(tty->driver)->chars_in_buffer(tty))
  11.776 +	{
  11.777 +		set_current_state(TASK_INTERRUPTIBLE);
  11.778 +		schedule_timeout(1);
  11.779 +		if (signal_pending(current))
  11.780 +			break;
  11.781 +		if ( (timeout != 0) &&
  11.782 +		     time_after(jiffies, orig_jiffies + timeout) )
  11.783 +			break;
  11.784 +	}
  11.785      
  11.786 -    set_current_state(TASK_RUNNING);
  11.787 +	set_current_state(TASK_RUNNING);
  11.788  }
  11.789  
  11.790  static int xencons_open(struct tty_struct *tty, struct file *filp)
  11.791  {
  11.792 -    unsigned long flags;
  11.793 -
  11.794 -    if ( TTY_INDEX(tty) != 0 )
  11.795 -        return 0;
  11.796 +	unsigned long flags;
  11.797  
  11.798 -    spin_lock_irqsave(&xencons_lock, flags);
  11.799 -    tty->driver_data = NULL;
  11.800 -    if ( xencons_tty == NULL )
  11.801 -        xencons_tty = tty;
  11.802 -    __xencons_tx_flush();
  11.803 -    spin_unlock_irqrestore(&xencons_lock, flags);    
  11.804 +	if (TTY_INDEX(tty) != 0)
  11.805 +		return 0;
  11.806  
  11.807 -    return 0;
  11.808 +	spin_lock_irqsave(&xencons_lock, flags);
  11.809 +	tty->driver_data = NULL;
  11.810 +	if (xencons_tty == NULL)
  11.811 +		xencons_tty = tty;
  11.812 +	__xencons_tx_flush();
  11.813 +	spin_unlock_irqrestore(&xencons_lock, flags);    
  11.814 +
  11.815 +	return 0;
  11.816  }
  11.817  
  11.818  static void xencons_close(struct tty_struct *tty, struct file *filp)
  11.819  {
  11.820 -    unsigned long flags;
  11.821 -
  11.822 -    if ( TTY_INDEX(tty) != 0 )
  11.823 -        return;
  11.824 +	unsigned long flags;
  11.825  
  11.826 -    if ( tty->count == 1 )
  11.827 -    {
  11.828 -        tty->closing = 1;
  11.829 -        tty_wait_until_sent(tty, 0);
  11.830 -        if ( DRV(tty->driver)->flush_buffer != NULL )
  11.831 -            DRV(tty->driver)->flush_buffer(tty);
  11.832 -        if ( tty->ldisc.flush_buffer != NULL )
  11.833 -            tty->ldisc.flush_buffer(tty);
  11.834 -        tty->closing = 0;
  11.835 -        spin_lock_irqsave(&xencons_lock, flags);
  11.836 -        xencons_tty = NULL;
  11.837 -        spin_unlock_irqrestore(&xencons_lock, flags);    
  11.838 -    }
  11.839 +	if (TTY_INDEX(tty) != 0)
  11.840 +		return;
  11.841 +
  11.842 +	if (tty->count == 1) {
  11.843 +		tty->closing = 1;
  11.844 +		tty_wait_until_sent(tty, 0);
  11.845 +		if (DRV(tty->driver)->flush_buffer != NULL)
  11.846 +			DRV(tty->driver)->flush_buffer(tty);
  11.847 +		if (tty->ldisc.flush_buffer != NULL)
  11.848 +			tty->ldisc.flush_buffer(tty);
  11.849 +		tty->closing = 0;
  11.850 +		spin_lock_irqsave(&xencons_lock, flags);
  11.851 +		xencons_tty = NULL;
  11.852 +		spin_unlock_irqrestore(&xencons_lock, flags);    
  11.853 +	}
  11.854  }
  11.855  
  11.856  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  11.857  static struct tty_operations xencons_ops = {
  11.858 -    .open = xencons_open,
  11.859 -    .close = xencons_close,
  11.860 -    .write = xencons_write,
  11.861 -    .write_room = xencons_write_room,
  11.862 -    .put_char = xencons_put_char,
  11.863 -    .flush_chars = xencons_flush_chars,
  11.864 -    .chars_in_buffer = xencons_chars_in_buffer,
  11.865 -    .send_xchar = xencons_send_xchar,
  11.866 -    .flush_buffer = xencons_flush_buffer,
  11.867 -    .throttle = xencons_throttle,
  11.868 -    .unthrottle = xencons_unthrottle,
  11.869 -    .wait_until_sent = xencons_wait_until_sent,
  11.870 +	.open = xencons_open,
  11.871 +	.close = xencons_close,
  11.872 +	.write = xencons_write,
  11.873 +	.write_room = xencons_write_room,
  11.874 +	.put_char = xencons_put_char,
  11.875 +	.flush_chars = xencons_flush_chars,
  11.876 +	.chars_in_buffer = xencons_chars_in_buffer,
  11.877 +	.send_xchar = xencons_send_xchar,
  11.878 +	.flush_buffer = xencons_flush_buffer,
  11.879 +	.throttle = xencons_throttle,
  11.880 +	.unthrottle = xencons_unthrottle,
  11.881 +	.wait_until_sent = xencons_wait_until_sent,
  11.882  };
  11.883  
  11.884  #ifdef CONFIG_XEN_PRIVILEGED_GUEST
  11.885  static const char *xennullcon_startup(void)
  11.886  {
  11.887 -    return NULL;
  11.888 +	return NULL;
  11.889  }
  11.890  
  11.891  static int xennullcon_dummy(void)
  11.892  {
  11.893 -    return 0;
  11.894 +	return 0;
  11.895  }
  11.896  
  11.897  #define DUMMY (void *)xennullcon_dummy
  11.898 @@ -672,122 +665,128 @@ static int xennullcon_dummy(void)
  11.899   */
  11.900  
  11.901  const struct consw xennull_con = {
  11.902 -    .owner =		THIS_MODULE,
  11.903 -    .con_startup =	xennullcon_startup,
  11.904 -    .con_init =		DUMMY,
  11.905 -    .con_deinit =	DUMMY,
  11.906 -    .con_clear =	DUMMY,
  11.907 -    .con_putc =		DUMMY,
  11.908 -    .con_putcs =	DUMMY,
  11.909 -    .con_cursor =	DUMMY,
  11.910 -    .con_scroll =	DUMMY,
  11.911 -    .con_bmove =	DUMMY,
  11.912 -    .con_switch =	DUMMY,
  11.913 -    .con_blank =	DUMMY,
  11.914 -    .con_font_set =	DUMMY,
  11.915 -    .con_font_get =	DUMMY,
  11.916 -    .con_font_default =	DUMMY,
  11.917 -    .con_font_copy =	DUMMY,
  11.918 -    .con_set_palette =	DUMMY,
  11.919 -    .con_scrolldelta =	DUMMY,
  11.920 +	.owner =		THIS_MODULE,
  11.921 +	.con_startup =	xennullcon_startup,
  11.922 +	.con_init =		DUMMY,
  11.923 +	.con_deinit =	DUMMY,
  11.924 +	.con_clear =	DUMMY,
  11.925 +	.con_putc =		DUMMY,
  11.926 +	.con_putcs =	DUMMY,
  11.927 +	.con_cursor =	DUMMY,
  11.928 +	.con_scroll =	DUMMY,
  11.929 +	.con_bmove =	DUMMY,
  11.930 +	.con_switch =	DUMMY,
  11.931 +	.con_blank =	DUMMY,
  11.932 +	.con_font_set =	DUMMY,
  11.933 +	.con_font_get =	DUMMY,
  11.934 +	.con_font_default =	DUMMY,
  11.935 +	.con_font_copy =	DUMMY,
  11.936 +	.con_set_palette =	DUMMY,
  11.937 +	.con_scrolldelta =	DUMMY,
  11.938  };
  11.939  #endif
  11.940  #endif
  11.941  
  11.942  static int __init xencons_init(void)
  11.943  {
  11.944 -    int rc;
  11.945 -
  11.946 -    if ( xc_mode == XC_OFF )
  11.947 -        return 0;
  11.948 -
  11.949 -    xencons_ring_init();
  11.950 -
  11.951 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  11.952 -    xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 
  11.953 -                                      1 : MAX_NR_CONSOLES);
  11.954 -    if ( xencons_driver == NULL )
  11.955 -        return -ENOMEM;
  11.956 -#else
  11.957 -    memset(&xencons_driver, 0, sizeof(struct tty_driver));
  11.958 -    xencons_driver.magic       = TTY_DRIVER_MAGIC;
  11.959 -    xencons_driver.refcount    = &xencons_refcount;
  11.960 -    xencons_driver.table       = xencons_table;
  11.961 -    xencons_driver.num         = (xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES;
  11.962 -#endif
  11.963 +	int rc;
  11.964  
  11.965 -    DRV(xencons_driver)->major           = TTY_MAJOR;
  11.966 -    DRV(xencons_driver)->type            = TTY_DRIVER_TYPE_SERIAL;
  11.967 -    DRV(xencons_driver)->subtype         = SERIAL_TYPE_NORMAL;
  11.968 -    DRV(xencons_driver)->init_termios    = tty_std_termios;
  11.969 -    DRV(xencons_driver)->flags           = 
  11.970 -        TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS;
  11.971 -    DRV(xencons_driver)->termios         = xencons_termios;
  11.972 -    DRV(xencons_driver)->termios_locked  = xencons_termios_locked;
  11.973 +	if (xc_mode == XC_OFF)
  11.974 +		return 0;
  11.975  
  11.976 -    if ( xc_mode == XC_SERIAL )
  11.977 -    {
  11.978 -        DRV(xencons_driver)->name        = "ttyS";
  11.979 -        DRV(xencons_driver)->minor_start = 64 + xc_num;
  11.980 -        DRV(xencons_driver)->name_base   = 0 + xc_num;
  11.981 -    }
  11.982 -    else
  11.983 -    {
  11.984 -        DRV(xencons_driver)->name        = "tty";
  11.985 -        DRV(xencons_driver)->minor_start = xc_num;
  11.986 -        DRV(xencons_driver)->name_base   = xc_num;
  11.987 -    }
  11.988 +	xencons_ring_init();
  11.989  
  11.990  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  11.991 -    tty_set_operations(xencons_driver, &xencons_ops);
  11.992 +	xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 
  11.993 +					  1 : MAX_NR_CONSOLES);
  11.994 +	if (xencons_driver == NULL)
  11.995 +		return -ENOMEM;
  11.996  #else
  11.997 -    xencons_driver.open            = xencons_open;
  11.998 -    xencons_driver.close           = xencons_close;
  11.999 -    xencons_driver.write           = xencons_write;
 11.1000 -    xencons_driver.write_room      = xencons_write_room;
 11.1001 -    xencons_driver.put_char        = xencons_put_char;
 11.1002 -    xencons_driver.flush_chars     = xencons_flush_chars;
 11.1003 -    xencons_driver.chars_in_buffer = xencons_chars_in_buffer;
 11.1004 -    xencons_driver.send_xchar      = xencons_send_xchar;
 11.1005 -    xencons_driver.flush_buffer    = xencons_flush_buffer;
 11.1006 -    xencons_driver.throttle        = xencons_throttle;
 11.1007 -    xencons_driver.unthrottle      = xencons_unthrottle;
 11.1008 -    xencons_driver.wait_until_sent = xencons_wait_until_sent;
 11.1009 +	memset(&xencons_driver, 0, sizeof(struct tty_driver));
 11.1010 +	xencons_driver.magic       = TTY_DRIVER_MAGIC;
 11.1011 +	xencons_driver.refcount    = &xencons_refcount;
 11.1012 +	xencons_driver.table       = xencons_table;
 11.1013 +	xencons_driver.num         =
 11.1014 +		(xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES;
 11.1015  #endif
 11.1016  
 11.1017 -    if ( (rc = tty_register_driver(DRV(xencons_driver))) != 0 )
 11.1018 -    {
 11.1019 -        printk("WARNING: Failed to register Xen virtual "
 11.1020 -               "console driver as '%s%d'\n",
 11.1021 -               DRV(xencons_driver)->name, DRV(xencons_driver)->name_base);
 11.1022 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 11.1023 -        put_tty_driver(xencons_driver);
 11.1024 -        xencons_driver = NULL;
 11.1025 -#endif
 11.1026 -        return rc;
 11.1027 -    }
 11.1028 +	DRV(xencons_driver)->major           = TTY_MAJOR;
 11.1029 +	DRV(xencons_driver)->type            = TTY_DRIVER_TYPE_SERIAL;
 11.1030 +	DRV(xencons_driver)->subtype         = SERIAL_TYPE_NORMAL;
 11.1031 +	DRV(xencons_driver)->init_termios    = tty_std_termios;
 11.1032 +	DRV(xencons_driver)->flags           = 
 11.1033 +		TTY_DRIVER_REAL_RAW |
 11.1034 +		TTY_DRIVER_RESET_TERMIOS |
 11.1035 +		TTY_DRIVER_NO_DEVFS;
 11.1036 +	DRV(xencons_driver)->termios         = xencons_termios;
 11.1037 +	DRV(xencons_driver)->termios_locked  = xencons_termios_locked;
 11.1038 +
 11.1039 +	if (xc_mode == XC_SERIAL)
 11.1040 +	{
 11.1041 +		DRV(xencons_driver)->name        = "ttyS";
 11.1042 +		DRV(xencons_driver)->minor_start = 64 + xc_num;
 11.1043 +		DRV(xencons_driver)->name_base   = 0 + xc_num;
 11.1044 +	} else {
 11.1045 +		DRV(xencons_driver)->name        = "tty";
 11.1046 +		DRV(xencons_driver)->minor_start = xc_num;
 11.1047 +		DRV(xencons_driver)->name_base   = xc_num;
 11.1048 +	}
 11.1049  
 11.1050  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 11.1051 -    tty_register_device(xencons_driver, 0, NULL);
 11.1052 +	tty_set_operations(xencons_driver, &xencons_ops);
 11.1053 +#else
 11.1054 +	xencons_driver.open            = xencons_open;
 11.1055 +	xencons_driver.close           = xencons_close;
 11.1056 +	xencons_driver.write           = xencons_write;
 11.1057 +	xencons_driver.write_room      = xencons_write_room;
 11.1058 +	xencons_driver.put_char        = xencons_put_char;
 11.1059 +	xencons_driver.flush_chars     = xencons_flush_chars;
 11.1060 +	xencons_driver.chars_in_buffer = xencons_chars_in_buffer;
 11.1061 +	xencons_driver.send_xchar      = xencons_send_xchar;
 11.1062 +	xencons_driver.flush_buffer    = xencons_flush_buffer;
 11.1063 +	xencons_driver.throttle        = xencons_throttle;
 11.1064 +	xencons_driver.unthrottle      = xencons_unthrottle;
 11.1065 +	xencons_driver.wait_until_sent = xencons_wait_until_sent;
 11.1066  #endif
 11.1067  
 11.1068 -    if ( xen_start_info->flags & SIF_INITDOMAIN )
 11.1069 -    {
 11.1070 -        xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
 11.1071 -        (void)request_irq(xencons_priv_irq,
 11.1072 -                          xencons_priv_interrupt, 0, "console", NULL);
 11.1073 -    }
 11.1074 -    else
 11.1075 -    {
 11.1076 -	
 11.1077 -	xencons_ring_register_receiver(xencons_rx);
 11.1078 -    }
 11.1079 +	if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) {
 11.1080 +		printk("WARNING: Failed to register Xen virtual "
 11.1081 +		       "console driver as '%s%d'\n",
 11.1082 +		       DRV(xencons_driver)->name, DRV(xencons_driver)->name_base);
 11.1083 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 11.1084 +		put_tty_driver(xencons_driver);
 11.1085 +		xencons_driver = NULL;
 11.1086 +#endif
 11.1087 +		return rc;
 11.1088 +	}
 11.1089  
 11.1090 -    printk("Xen virtual console successfully installed as %s%d\n",
 11.1091 -           DRV(xencons_driver)->name,
 11.1092 -           DRV(xencons_driver)->name_base );
 11.1093 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 11.1094 +	tty_register_device(xencons_driver, 0, NULL);
 11.1095 +#endif
 11.1096 +
 11.1097 +	if (xen_start_info->flags & SIF_INITDOMAIN) {
 11.1098 +		xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
 11.1099 +		(void)request_irq(xencons_priv_irq,
 11.1100 +				  xencons_priv_interrupt, 0, "console", NULL);
 11.1101 +	} else {
 11.1102 +		xencons_ring_register_receiver(xencons_rx);
 11.1103 +	}
 11.1104 +
 11.1105 +	printk("Xen virtual console successfully installed as %s%d\n",
 11.1106 +	       DRV(xencons_driver)->name,
 11.1107 +	       DRV(xencons_driver)->name_base );
 11.1108      
 11.1109 -    return 0;
 11.1110 +	return 0;
 11.1111  }
 11.1112  
 11.1113  module_init(xencons_init);
 11.1114 +
 11.1115 +/*
 11.1116 + * Local variables:
 11.1117 + *  c-file-style: "linux"
 11.1118 + *  indent-tabs-mode: t
 11.1119 + *  c-indent-level: 8
 11.1120 + *  c-basic-offset: 8
 11.1121 + *  tab-width: 8
 11.1122 + * End:
 11.1123 + */
    12.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c	Thu Sep 22 14:01:01 2005 +0100
    12.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c	Thu Sep 22 14:04:14 2005 +0100
    12.3 @@ -125,3 +125,13 @@ void xencons_resume(void)
    12.4  
    12.5  	(void)xencons_ring_init();
    12.6  }
    12.7 +
    12.8 +/*
    12.9 + * Local variables:
   12.10 + *  c-file-style: "linux"
   12.11 + *  indent-tabs-mode: t
   12.12 + *  c-indent-level: 8
   12.13 + *  c-basic-offset: 8
   12.14 + *  tab-width: 8
   12.15 + * End:
   12.16 + */
    13.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h	Thu Sep 22 14:01:01 2005 +0100
    13.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h	Thu Sep 22 14:04:14 2005 +0100
    13.3 @@ -3,12 +3,21 @@
    13.4  
    13.5  asmlinkage int xprintk(const char *fmt, ...);
    13.6  
    13.7 -
    13.8  int xencons_ring_init(void);
    13.9  int xencons_ring_send(const char *data, unsigned len);
   13.10  
   13.11 -typedef void (xencons_receiver_func)(char *buf, unsigned len, 
   13.12 -                                     struct pt_regs *regs);
   13.13 +typedef void (xencons_receiver_func)(
   13.14 +	char *buf, unsigned len, struct pt_regs *regs);
   13.15  void xencons_ring_register_receiver(xencons_receiver_func *f);
   13.16  
   13.17  #endif /* _XENCONS_RING_H */
   13.18 +
   13.19 +/*
   13.20 + * Local variables:
   13.21 + *  c-file-style: "linux"
   13.22 + *  indent-tabs-mode: t
   13.23 + *  c-indent-level: 8
   13.24 + *  c-basic-offset: 8
   13.25 + *  tab-width: 8
   13.26 + * End:
   13.27 + */
    14.1 --- a/linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c	Thu Sep 22 14:01:01 2005 +0100
    14.2 +++ b/linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c	Thu Sep 22 14:04:14 2005 +0100
    14.3 @@ -1,9 +1,9 @@
    14.4  /******************************************************************************
    14.5   * evtchn.c
    14.6   * 
    14.7 - * Xenolinux driver for receiving and demuxing event-channel signals.
    14.8 + * Driver for receiving and demuxing event-channel signals.
    14.9   * 
   14.10 - * Copyright (c) 2004, K A Fraser
   14.11 + * Copyright (c) 2004-2005, K A Fraser
   14.12   * Multi-process extensions Copyright (c) 2004, Steven Smith
   14.13   * 
   14.14   * This file may be distributed separately from the Linux kernel, or
   14.15 @@ -46,29 +46,18 @@
   14.16  #include <linux/init.h>
   14.17  #define XEN_EVTCHN_MASK_OPS
   14.18  #include <asm-xen/evtchn.h>
   14.19 -
   14.20 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
   14.21 -#include <linux/devfs_fs_kernel.h>
   14.22 -#define OLD_DEVFS
   14.23 -#else
   14.24  #include <linux/gfp.h>
   14.25 -#endif
   14.26 -
   14.27 -#ifdef OLD_DEVFS
   14.28 -/* NB. This must be shared amongst drivers if more things go in /dev/xen */
   14.29 -static devfs_handle_t xen_dev_dir;
   14.30 -#endif
   14.31  
   14.32  struct per_user_data {
   14.33 -    /* Notification ring, accessed via /dev/xen/evtchn. */
   14.34 -#   define EVTCHN_RING_SIZE     2048  /* 2048 16-bit entries */
   14.35 -#   define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
   14.36 -    u16 *ring;
   14.37 -    unsigned int ring_cons, ring_prod, ring_overflow;
   14.38 +	/* Notification ring, accessed via /dev/xen/evtchn. */
   14.39 +#define EVTCHN_RING_SIZE     2048  /* 2048 16-bit entries */
   14.40 +#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
   14.41 +	u16 *ring;
   14.42 +	unsigned int ring_cons, ring_prod, ring_overflow;
   14.43  
   14.44 -    /* Processes wait on this queue when ring is empty. */
   14.45 -    wait_queue_head_t evtchn_wait;
   14.46 -    struct fasync_struct *evtchn_async_queue;
   14.47 +	/* Processes wait on this queue when ring is empty. */
   14.48 +	wait_queue_head_t evtchn_wait;
   14.49 +	struct fasync_struct *evtchn_async_queue;
   14.50  };
   14.51  
   14.52  /* Who's bound to each port? */
   14.53 @@ -77,356 +66,310 @@ static spinlock_t port_user_lock;
   14.54  
   14.55  void evtchn_device_upcall(int port)
   14.56  {
   14.57 -    struct per_user_data *u;
   14.58 -
   14.59 -    spin_lock(&port_user_lock);
   14.60 -
   14.61 -    mask_evtchn(port);
   14.62 -    clear_evtchn(port);
   14.63 +	struct per_user_data *u;
   14.64  
   14.65 -    if ( (u = port_user[port]) != NULL )
   14.66 -    {
   14.67 -        if ( (u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE )
   14.68 -        {
   14.69 -            u->ring[EVTCHN_RING_MASK(u->ring_prod)] = (u16)port;
   14.70 -            if ( u->ring_cons == u->ring_prod++ )
   14.71 -            {
   14.72 -                wake_up_interruptible(&u->evtchn_wait);
   14.73 -                kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN);
   14.74 -            }
   14.75 -        }
   14.76 -        else
   14.77 -        {
   14.78 -            u->ring_overflow = 1;
   14.79 -        }
   14.80 -    }
   14.81 +	spin_lock(&port_user_lock);
   14.82  
   14.83 -    spin_unlock(&port_user_lock);
   14.84 +	mask_evtchn(port);
   14.85 +	clear_evtchn(port);
   14.86 +
   14.87 +	if ((u = port_user[port]) != NULL) {
   14.88 +		if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
   14.89 +			u->ring[EVTCHN_RING_MASK(u->ring_prod)] = (u16)port;
   14.90 +			if (u->ring_cons == u->ring_prod++) {
   14.91 +				wake_up_interruptible(&u->evtchn_wait);
   14.92 +				kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN);
   14.93 +			}
   14.94 +		} else {
   14.95 +			u->ring_overflow = 1;
   14.96 +		}
   14.97 +	}
   14.98 +
   14.99 +	spin_unlock(&port_user_lock);
  14.100  }
  14.101  
  14.102  static ssize_t evtchn_read(struct file *file, char *buf,
  14.103                             size_t count, loff_t *ppos)
  14.104  {
  14.105 -    int rc;
  14.106 -    unsigned int c, p, bytes1 = 0, bytes2 = 0;
  14.107 -    DECLARE_WAITQUEUE(wait, current);
  14.108 -    struct per_user_data *u = file->private_data;
  14.109 -
  14.110 -    add_wait_queue(&u->evtchn_wait, &wait);
  14.111 -
  14.112 -    count &= ~1; /* even number of bytes */
  14.113 -
  14.114 -    if ( count == 0 )
  14.115 -    {
  14.116 -        rc = 0;
  14.117 -        goto out;
  14.118 -    }
  14.119 -
  14.120 -    if ( count > PAGE_SIZE )
  14.121 -        count = PAGE_SIZE;
  14.122 -
  14.123 -    for ( ; ; )
  14.124 -    {
  14.125 -        set_current_state(TASK_INTERRUPTIBLE);
  14.126 -
  14.127 -        if ( (c = u->ring_cons) != (p = u->ring_prod) )
  14.128 -            break;
  14.129 -
  14.130 -        if ( u->ring_overflow )
  14.131 -        {
  14.132 -            rc = -EFBIG;
  14.133 -            goto out;
  14.134 -        }
  14.135 -
  14.136 -        if ( file->f_flags & O_NONBLOCK )
  14.137 -        {
  14.138 -            rc = -EAGAIN;
  14.139 -            goto out;
  14.140 -        }
  14.141 +	int rc;
  14.142 +	unsigned int c, p, bytes1 = 0, bytes2 = 0;
  14.143 +	DECLARE_WAITQUEUE(wait, current);
  14.144 +	struct per_user_data *u = file->private_data;
  14.145  
  14.146 -        if ( signal_pending(current) )
  14.147 -        {
  14.148 -            rc = -ERESTARTSYS;
  14.149 -            goto out;
  14.150 -        }
  14.151 -
  14.152 -        schedule();
  14.153 -    }
  14.154 -
  14.155 -    /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
  14.156 -    if ( ((c ^ p) & EVTCHN_RING_SIZE) != 0 )
  14.157 -    {
  14.158 -        bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * sizeof(u16);
  14.159 -        bytes2 = EVTCHN_RING_MASK(p) * sizeof(u16);
  14.160 -    }
  14.161 -    else
  14.162 -    {
  14.163 -        bytes1 = (p - c) * sizeof(u16);
  14.164 -        bytes2 = 0;
  14.165 -    }
  14.166 +	add_wait_queue(&u->evtchn_wait, &wait);
  14.167  
  14.168 -    /* Truncate chunks according to caller's maximum byte count. */
  14.169 -    if ( bytes1 > count )
  14.170 -    {
  14.171 -        bytes1 = count;
  14.172 -        bytes2 = 0;
  14.173 -    }
  14.174 -    else if ( (bytes1 + bytes2) > count )
  14.175 -    {
  14.176 -        bytes2 = count - bytes1;
  14.177 -    }
  14.178 +	count &= ~1; /* even number of bytes */
  14.179  
  14.180 -    if ( copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
  14.181 -         ((bytes2 != 0) && copy_to_user(&buf[bytes1], &u->ring[0], bytes2)) )
  14.182 -    {
  14.183 -        rc = -EFAULT;
  14.184 -        goto out;
  14.185 -    }
  14.186 +	if (count == 0) {
  14.187 +		rc = 0;
  14.188 +		goto out;
  14.189 +	}
  14.190  
  14.191 -    u->ring_cons += (bytes1 + bytes2) / sizeof(u16);
  14.192 +	if (count > PAGE_SIZE)
  14.193 +		count = PAGE_SIZE;
  14.194  
  14.195 -    rc = bytes1 + bytes2;
  14.196 +	for (;;) {
  14.197 +		set_current_state(TASK_INTERRUPTIBLE);
  14.198 +
  14.199 +		if ((c = u->ring_cons) != (p = u->ring_prod))
  14.200 +			break;
  14.201 +
  14.202 +		if (u->ring_overflow) {
  14.203 +			rc = -EFBIG;
  14.204 +			goto out;
  14.205 +		}
  14.206 +
  14.207 +		if (file->f_flags & O_NONBLOCK) {
  14.208 +			rc = -EAGAIN;
  14.209 +			goto out;
  14.210 +		}
  14.211 +
  14.212 +		if (signal_pending(current)) {
  14.213 +			rc = -ERESTARTSYS;
  14.214 +			goto out;
  14.215 +		}
  14.216 +
  14.217 +		schedule();
  14.218 +	}
  14.219 +
  14.220 +	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
  14.221 +	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
  14.222 +		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
  14.223 +			sizeof(u16);
  14.224 +		bytes2 = EVTCHN_RING_MASK(p) * sizeof(u16);
  14.225 +	} else {
  14.226 +		bytes1 = (p - c) * sizeof(u16);
  14.227 +		bytes2 = 0;
  14.228 +	}
  14.229 +
  14.230 +	/* Truncate chunks according to caller's maximum byte count. */
  14.231 +	if (bytes1 > count) {
  14.232 +		bytes1 = count;
  14.233 +		bytes2 = 0;
  14.234 +	} else if ((bytes1 + bytes2) > count) {
  14.235 +		bytes2 = count - bytes1;
  14.236 +	}
  14.237 +
  14.238 +	if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
  14.239 +	    ((bytes2 != 0) &&
  14.240 +	     copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) {
  14.241 +		rc = -EFAULT;
  14.242 +		goto out;
  14.243 +	}
  14.244 +
  14.245 +	u->ring_cons += (bytes1 + bytes2) / sizeof(u16);
  14.246 +
  14.247 +	rc = bytes1 + bytes2;
  14.248  
  14.249   out:
  14.250 -    __set_current_state(TASK_RUNNING);
  14.251 -    remove_wait_queue(&u->evtchn_wait, &wait);
  14.252 -    return rc;
  14.253 +	__set_current_state(TASK_RUNNING);
  14.254 +	remove_wait_queue(&u->evtchn_wait, &wait);
  14.255 +	return rc;
  14.256  }
  14.257  
  14.258  static ssize_t evtchn_write(struct file *file, const char *buf,
  14.259                              size_t count, loff_t *ppos)
  14.260  {
  14.261 -    int  rc, i;
  14.262 -    u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
  14.263 -    struct per_user_data *u = file->private_data;
  14.264 -
  14.265 -    if ( kbuf == NULL )
  14.266 -        return -ENOMEM;
  14.267 -
  14.268 -    count &= ~1; /* even number of bytes */
  14.269 -
  14.270 -    if ( count == 0 )
  14.271 -    {
  14.272 -        rc = 0;
  14.273 -        goto out;
  14.274 -    }
  14.275 +	int  rc, i;
  14.276 +	u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
  14.277 +	struct per_user_data *u = file->private_data;
  14.278  
  14.279 -    if ( count > PAGE_SIZE )
  14.280 -        count = PAGE_SIZE;
  14.281 +	if (kbuf == NULL)
  14.282 +		return -ENOMEM;
  14.283  
  14.284 -    if ( copy_from_user(kbuf, buf, count) != 0 )
  14.285 -    {
  14.286 -        rc = -EFAULT;
  14.287 -        goto out;
  14.288 -    }
  14.289 +	count &= ~1; /* even number of bytes */
  14.290  
  14.291 -    spin_lock_irq(&port_user_lock);
  14.292 -    for ( i = 0; i < (count/2); i++ )
  14.293 -        if ( (kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u) )
  14.294 -            unmask_evtchn(kbuf[i]);
  14.295 -    spin_unlock_irq(&port_user_lock);
  14.296 +	if (count == 0) {
  14.297 +		rc = 0;
  14.298 +		goto out;
  14.299 +	}
  14.300  
  14.301 -    rc = count;
  14.302 +	if (count > PAGE_SIZE)
  14.303 +		count = PAGE_SIZE;
  14.304 +
  14.305 +	if (copy_from_user(kbuf, buf, count) != 0) {
  14.306 +		rc = -EFAULT;
  14.307 +		goto out;
  14.308 +	}
  14.309 +
  14.310 +	spin_lock_irq(&port_user_lock);
  14.311 +	for (i = 0; i < (count/2); i++)
  14.312 +		if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
  14.313 +			unmask_evtchn(kbuf[i]);
  14.314 +	spin_unlock_irq(&port_user_lock);
  14.315 +
  14.316 +	rc = count;
  14.317  
  14.318   out:
  14.319 -    free_page((unsigned long)kbuf);
  14.320 -    return rc;
  14.321 +	free_page((unsigned long)kbuf);
  14.322 +	return rc;
  14.323  }
  14.324  
  14.325  static int evtchn_ioctl(struct inode *inode, struct file *file,
  14.326                          unsigned int cmd, unsigned long arg)
  14.327  {
  14.328 -    int rc = 0;
  14.329 -    struct per_user_data *u = file->private_data;
  14.330 -
  14.331 -    spin_lock_irq(&port_user_lock);
  14.332 -    
  14.333 -    switch ( cmd )
  14.334 -    {
  14.335 -    case EVTCHN_RESET:
  14.336 -        /* Initialise the ring to empty. Clear errors. */
  14.337 -        u->ring_cons = u->ring_prod = u->ring_overflow = 0;
  14.338 -        break;
  14.339 +	int rc = 0;
  14.340 +	struct per_user_data *u = file->private_data;
  14.341  
  14.342 -    case EVTCHN_BIND:
  14.343 -        if ( arg >= NR_EVENT_CHANNELS )
  14.344 -        {
  14.345 -            rc = -EINVAL;
  14.346 -        }
  14.347 -        else if ( port_user[arg] != NULL )
  14.348 -        {
  14.349 -            rc = -EISCONN;
  14.350 -        }
  14.351 -        else
  14.352 -        {
  14.353 -            port_user[arg] = u;
  14.354 -            unmask_evtchn(arg);
  14.355 -        }
  14.356 -        break;
  14.357 +	spin_lock_irq(&port_user_lock);
  14.358 +    
  14.359 +	switch (cmd) {
  14.360 +	case EVTCHN_RESET:
  14.361 +		/* Initialise the ring to empty. Clear errors. */
  14.362 +		u->ring_cons = u->ring_prod = u->ring_overflow = 0;
  14.363 +		break;
  14.364  
  14.365 -    case EVTCHN_UNBIND:
  14.366 -        if ( arg >= NR_EVENT_CHANNELS )
  14.367 -        {
  14.368 -            rc = -EINVAL;
  14.369 -        }
  14.370 -        else if ( port_user[arg] != u )
  14.371 -        {
  14.372 -            rc = -ENOTCONN;
  14.373 -        }
  14.374 -        else
  14.375 -        {
  14.376 -            port_user[arg] = NULL;
  14.377 -            mask_evtchn(arg);
  14.378 -        }
  14.379 -        break;
  14.380 +	case EVTCHN_BIND:
  14.381 +		if (arg >= NR_EVENT_CHANNELS) {
  14.382 +			rc = -EINVAL;
  14.383 +		} else if (port_user[arg] != NULL) {
  14.384 +			rc = -EISCONN;
  14.385 +		} else {
  14.386 +			port_user[arg] = u;
  14.387 +			unmask_evtchn(arg);
  14.388 +		}
  14.389 +		break;
  14.390  
  14.391 -    default:
  14.392 -        rc = -ENOSYS;
  14.393 -        break;
  14.394 -    }
  14.395 +	case EVTCHN_UNBIND:
  14.396 +		if (arg >= NR_EVENT_CHANNELS) {
  14.397 +			rc = -EINVAL;
  14.398 +		} else if (port_user[arg] != u) {
  14.399 +			rc = -ENOTCONN;
  14.400 +		} else {
  14.401 +			port_user[arg] = NULL;
  14.402 +			mask_evtchn(arg);
  14.403 +		}
  14.404 +		break;
  14.405  
  14.406 -    spin_unlock_irq(&port_user_lock);   
  14.407 +	default:
  14.408 +		rc = -ENOSYS;
  14.409 +		break;
  14.410 +	}
  14.411  
  14.412 -    return rc;
  14.413 +	spin_unlock_irq(&port_user_lock);   
  14.414 +
  14.415 +	return rc;
  14.416  }
  14.417  
  14.418  static unsigned int evtchn_poll(struct file *file, poll_table *wait)
  14.419  {
  14.420 -    unsigned int mask = POLLOUT | POLLWRNORM;
  14.421 -    struct per_user_data *u = file->private_data;
  14.422 +	unsigned int mask = POLLOUT | POLLWRNORM;
  14.423 +	struct per_user_data *u = file->private_data;
  14.424  
  14.425 -    poll_wait(file, &u->evtchn_wait, wait);
  14.426 -    if ( u->ring_cons != u->ring_prod )
  14.427 -        mask |= POLLIN | POLLRDNORM;
  14.428 -    if ( u->ring_overflow )
  14.429 -        mask = POLLERR;
  14.430 -    return mask;
  14.431 +	poll_wait(file, &u->evtchn_wait, wait);
  14.432 +	if (u->ring_cons != u->ring_prod)
  14.433 +		mask |= POLLIN | POLLRDNORM;
  14.434 +	if (u->ring_overflow)
  14.435 +		mask = POLLERR;
  14.436 +	return mask;
  14.437  }
  14.438  
  14.439  static int evtchn_fasync(int fd, struct file *filp, int on)
  14.440  {
  14.441 -    struct per_user_data *u = filp->private_data;
  14.442 -    return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
  14.443 +	struct per_user_data *u = filp->private_data;
  14.444 +	return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
  14.445  }
  14.446  
  14.447  static int evtchn_open(struct inode *inode, struct file *filp)
  14.448  {
  14.449 -    struct per_user_data *u;
  14.450 -
  14.451 -    if ( (u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL )
  14.452 -        return -ENOMEM;
  14.453 -
  14.454 -    memset(u, 0, sizeof(*u));
  14.455 -    init_waitqueue_head(&u->evtchn_wait);
  14.456 +	struct per_user_data *u;
  14.457  
  14.458 -    if ( (u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL )
  14.459 -    {
  14.460 -        kfree(u);
  14.461 -        return -ENOMEM;
  14.462 -    }
  14.463 +	if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL)
  14.464 +		return -ENOMEM;
  14.465  
  14.466 -    filp->private_data = u;
  14.467 +	memset(u, 0, sizeof(*u));
  14.468 +	init_waitqueue_head(&u->evtchn_wait);
  14.469  
  14.470 -    return 0;
  14.471 +	if ((u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL)
  14.472 +	{
  14.473 +		kfree(u);
  14.474 +		return -ENOMEM;
  14.475 +	}
  14.476 +
  14.477 +	filp->private_data = u;
  14.478 +
  14.479 +	return 0;
  14.480  }
  14.481  
  14.482  static int evtchn_release(struct inode *inode, struct file *filp)
  14.483  {
  14.484 -    int i;
  14.485 -    struct per_user_data *u = filp->private_data;
  14.486 -
  14.487 -    spin_lock_irq(&port_user_lock);
  14.488 -
  14.489 -    free_page((unsigned long)u->ring);
  14.490 +	int i;
  14.491 +	struct per_user_data *u = filp->private_data;
  14.492  
  14.493 -    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
  14.494 -    {
  14.495 -        if ( port_user[i] == u )
  14.496 -        {
  14.497 -            port_user[i] = NULL;
  14.498 -            mask_evtchn(i);
  14.499 -        }
  14.500 -    }
  14.501 +	spin_lock_irq(&port_user_lock);
  14.502  
  14.503 -    spin_unlock_irq(&port_user_lock);
  14.504 +	free_page((unsigned long)u->ring);
  14.505  
  14.506 -    kfree(u);
  14.507 +	for (i = 0; i < NR_EVENT_CHANNELS; i++)
  14.508 +	{
  14.509 +		if (port_user[i] == u)
  14.510 +		{
  14.511 +			port_user[i] = NULL;
  14.512 +			mask_evtchn(i);
  14.513 +		}
  14.514 +	}
  14.515  
  14.516 -    return 0;
  14.517 +	spin_unlock_irq(&port_user_lock);
  14.518 +
  14.519 +	kfree(u);
  14.520 +
  14.521 +	return 0;
  14.522  }
  14.523  
  14.524  static struct file_operations evtchn_fops = {
  14.525 -    .owner   = THIS_MODULE,
  14.526 -    .read    = evtchn_read,
  14.527 -    .write   = evtchn_write,
  14.528 -    .ioctl   = evtchn_ioctl,
  14.529 -    .poll    = evtchn_poll,
  14.530 -    .fasync  = evtchn_fasync,
  14.531 -    .open    = evtchn_open,
  14.532 -    .release = evtchn_release,
  14.533 +	.owner   = THIS_MODULE,
  14.534 +	.read    = evtchn_read,
  14.535 +	.write   = evtchn_write,
  14.536 +	.ioctl   = evtchn_ioctl,
  14.537 +	.poll    = evtchn_poll,
  14.538 +	.fasync  = evtchn_fasync,
  14.539 +	.open    = evtchn_open,
  14.540 +	.release = evtchn_release,
  14.541  };
  14.542  
  14.543  static struct miscdevice evtchn_miscdev = {
  14.544 -    .minor        = EVTCHN_MINOR,
  14.545 -    .name         = "evtchn",
  14.546 -    .fops         = &evtchn_fops,
  14.547 +	.minor        = EVTCHN_MINOR,
  14.548 +	.name         = "evtchn",
  14.549 +	.fops         = &evtchn_fops,
  14.550  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  14.551 -    .devfs_name   = "misc/evtchn",
  14.552 +	.devfs_name   = "misc/evtchn",
  14.553  #endif
  14.554  };
  14.555  
  14.556  static int __init evtchn_init(void)
  14.557  {
  14.558 -#ifdef OLD_DEVFS
  14.559 -    devfs_handle_t symlink_handle;
  14.560 -    int            pos;
  14.561 -    char           link_dest[64];
  14.562 -#endif
  14.563 -    int err;
  14.564 -
  14.565 -    spin_lock_init(&port_user_lock);
  14.566 -    memset(port_user, 0, sizeof(port_user));
  14.567 -
  14.568 -    /* (DEVFS) create '/dev/misc/evtchn'. */
  14.569 -    err = misc_register(&evtchn_miscdev);
  14.570 -    if ( err != 0 )
  14.571 -    {
  14.572 -        printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
  14.573 -        return err;
  14.574 -    }
  14.575 -
  14.576 -#ifdef OLD_DEVFS
  14.577 -    /* (DEVFS) create directory '/dev/xen'. */
  14.578 -    xen_dev_dir = devfs_mk_dir(NULL, "xen", NULL);
  14.579 +	int err;
  14.580  
  14.581 -    /* (DEVFS) &link_dest[pos] == '../misc/evtchn'. */
  14.582 -    pos = devfs_generate_path(evtchn_miscdev.devfs_handle, 
  14.583 -                              &link_dest[3], 
  14.584 -                              sizeof(link_dest) - 3);
  14.585 -    if ( pos >= 0 )
  14.586 -        strncpy(&link_dest[pos], "../", 3);
  14.587 +	spin_lock_init(&port_user_lock);
  14.588 +	memset(port_user, 0, sizeof(port_user));
  14.589  
  14.590 -    /* (DEVFS) symlink '/dev/xen/evtchn' -> '../misc/evtchn'. */
  14.591 -    (void)devfs_mk_symlink(xen_dev_dir, 
  14.592 -                           "evtchn", 
  14.593 -                           DEVFS_FL_DEFAULT, 
  14.594 -                           &link_dest[pos],
  14.595 -                           &symlink_handle, 
  14.596 -                           NULL);
  14.597 +	/* (DEVFS) create '/dev/misc/evtchn'. */
  14.598 +	err = misc_register(&evtchn_miscdev);
  14.599 +	if (err != 0)
  14.600 +	{
  14.601 +		printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
  14.602 +		return err;
  14.603 +	}
  14.604  
  14.605 -    /* (DEVFS) automatically destroy the symlink with its destination. */
  14.606 -    devfs_auto_unregister(evtchn_miscdev.devfs_handle, symlink_handle);
  14.607 -#endif
  14.608 +	printk("Event-channel device installed.\n");
  14.609  
  14.610 -    printk("Event-channel device installed.\n");
  14.611 -
  14.612 -    return 0;
  14.613 +	return 0;
  14.614  }
  14.615  
  14.616  static void evtchn_cleanup(void)
  14.617  {
  14.618 -    misc_deregister(&evtchn_miscdev);
  14.619 +	misc_deregister(&evtchn_miscdev);
  14.620  }
  14.621  
  14.622  module_init(evtchn_init);
  14.623  module_exit(evtchn_cleanup);
  14.624 +
  14.625 +/*
  14.626 + * Local variables:
  14.627 + *  c-file-style: "linux"
  14.628 + *  indent-tabs-mode: t
  14.629 + *  c-indent-level: 8
  14.630 + *  c-basic-offset: 8
  14.631 + *  tab-width: 8
  14.632 + * End:
  14.633 + */
    15.1 --- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Thu Sep 22 14:01:01 2005 +0100
    15.2 +++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Thu Sep 22 14:04:14 2005 +0100
    15.3 @@ -41,232 +41,253 @@ static struct proc_dir_entry *privcmd_in
    15.4  static int privcmd_ioctl(struct inode *inode, struct file *file,
    15.5                           unsigned int cmd, unsigned long data)
    15.6  {
    15.7 -    int ret = -ENOSYS;
    15.8 +	int ret = -ENOSYS;
    15.9  
   15.10 -    switch ( cmd )
   15.11 -    {
   15.12 -    case IOCTL_PRIVCMD_HYPERCALL:
   15.13 -    {
   15.14 -        privcmd_hypercall_t hypercall;
   15.15 +	switch (cmd) {
   15.16 +	case IOCTL_PRIVCMD_HYPERCALL: {
   15.17 +		privcmd_hypercall_t hypercall;
   15.18    
   15.19 -        if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
   15.20 -            return -EFAULT;
   15.21 +		if (copy_from_user(&hypercall, (void *)data,
   15.22 +				   sizeof(hypercall)))
   15.23 +			return -EFAULT;
   15.24  
   15.25  #if defined(__i386__)
   15.26 -        __asm__ __volatile__ (
   15.27 -            "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
   15.28 -            "movl  4(%%eax),%%ebx ;"
   15.29 -            "movl  8(%%eax),%%ecx ;"
   15.30 -            "movl 12(%%eax),%%edx ;"
   15.31 -            "movl 16(%%eax),%%esi ;"
   15.32 -            "movl 20(%%eax),%%edi ;"
   15.33 -            "movl   (%%eax),%%eax ;"
   15.34 -            TRAP_INSTR "; "
   15.35 -            "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
   15.36 -            : "=a" (ret) : "0" (&hypercall) : "memory" );
   15.37 +		__asm__ __volatile__ (
   15.38 +			"pushl %%ebx; pushl %%ecx; pushl %%edx; "
   15.39 +			"pushl %%esi; pushl %%edi; "
   15.40 +			"movl  4(%%eax),%%ebx ;"
   15.41 +			"movl  8(%%eax),%%ecx ;"
   15.42 +			"movl 12(%%eax),%%edx ;"
   15.43 +			"movl 16(%%eax),%%esi ;"
   15.44 +			"movl 20(%%eax),%%edi ;"
   15.45 +			"movl   (%%eax),%%eax ;"
   15.46 +			TRAP_INSTR "; "
   15.47 +			"popl %%edi; popl %%esi; popl %%edx; "
   15.48 +			"popl %%ecx; popl %%ebx"
   15.49 +			: "=a" (ret) : "0" (&hypercall) : "memory" );
   15.50  #elif defined (__x86_64__)
   15.51 -        {
   15.52 -            long ign1, ign2, ign3;
   15.53 -            __asm__ __volatile__ (
   15.54 -                "movq %8,%%r10; movq %9,%%r8;" TRAP_INSTR
   15.55 -                : "=a" (ret), "=D" (ign1), "=S" (ign2), "=d" (ign3)
   15.56 -                : "0" ((unsigned long)hypercall.op), 
   15.57 -                "1" ((unsigned long)hypercall.arg[0]), 
   15.58 -                "2" ((unsigned long)hypercall.arg[1]),
   15.59 -                "3" ((unsigned long)hypercall.arg[2]), 
   15.60 -                "g" ((unsigned long)hypercall.arg[3]),
   15.61 -                "g" ((unsigned long)hypercall.arg[4])
   15.62 -                : "r11","rcx","r8","r10","memory");
   15.63 -        }
   15.64 +		{
   15.65 +			long ign1, ign2, ign3;
   15.66 +			__asm__ __volatile__ (
   15.67 +				"movq %8,%%r10; movq %9,%%r8;" TRAP_INSTR
   15.68 +				: "=a" (ret), "=D" (ign1),
   15.69 +				  "=S" (ign2), "=d" (ign3)
   15.70 +				: "0" ((unsigned long)hypercall.op), 
   15.71 +				"1" ((unsigned long)hypercall.arg[0]), 
   15.72 +				"2" ((unsigned long)hypercall.arg[1]),
   15.73 +				"3" ((unsigned long)hypercall.arg[2]), 
   15.74 +				"g" ((unsigned long)hypercall.arg[3]),
   15.75 +				"g" ((unsigned long)hypercall.arg[4])
   15.76 +				: "r11","rcx","r8","r10","memory");
   15.77 +		}
   15.78  #elif defined (__ia64__)
   15.79 -       __asm__ __volatile__ (
   15.80 -           ";; mov r14=%2; mov r15=%3; mov r16=%4; mov r17=%5; mov r18=%6;"
   15.81 -           "mov r2=%1; break 0x1000;; mov %0=r8 ;;"
   15.82 -           : "=r" (ret)
   15.83 -           : "r" (hypercall.op),
   15.84 -             "r" (hypercall.arg[0]),
   15.85 -             "r" (hypercall.arg[1]),
   15.86 -             "r" (hypercall.arg[2]),
   15.87 -             "r" (hypercall.arg[3]),
   15.88 -             "r" (hypercall.arg[4])
   15.89 -           : "r14","r15","r16","r17","r18","r2","r8","memory");
   15.90 +		__asm__ __volatile__ (
   15.91 +			";; mov r14=%2; mov r15=%3; "
   15.92 +			"mov r16=%4; mov r17=%5; mov r18=%6;"
   15.93 +			"mov r2=%1; break 0x1000;; mov %0=r8 ;;"
   15.94 +			: "=r" (ret)
   15.95 +			: "r" (hypercall.op),
   15.96 +			"r" (hypercall.arg[0]),
   15.97 +			"r" (hypercall.arg[1]),
   15.98 +			"r" (hypercall.arg[2]),
   15.99 +			"r" (hypercall.arg[3]),
  15.100 +			"r" (hypercall.arg[4])
  15.101 +			: "r14","r15","r16","r17","r18","r2","r8","memory");
  15.102  #endif
  15.103 -    }
  15.104 -    break;
  15.105 +	}
  15.106 +	break;
  15.107  
  15.108  #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
  15.109 -    case IOCTL_PRIVCMD_MMAP:
  15.110 -    {
  15.111 +	case IOCTL_PRIVCMD_MMAP: {
  15.112  #define PRIVCMD_MMAP_SZ 32
  15.113 -        privcmd_mmap_t mmapcmd;
  15.114 -        privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
  15.115 -        int i, rc;
  15.116 -
  15.117 -        if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) )
  15.118 -            return -EFAULT;
  15.119 -
  15.120 -        p = mmapcmd.entry;
  15.121 -
  15.122 -        for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ)
  15.123 -        {
  15.124 -            int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
  15.125 -                PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
  15.126 -
  15.127 -
  15.128 -            if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
  15.129 -                return -EFAULT;
  15.130 -     
  15.131 -            for ( j = 0; j < n; j++ )
  15.132 -            {
  15.133 -                struct vm_area_struct *vma = 
  15.134 -                    find_vma( current->mm, msg[j].va );
  15.135 -
  15.136 -                if ( !vma )
  15.137 -                    return -EINVAL;
  15.138 -
  15.139 -                if ( msg[j].va > PAGE_OFFSET )
  15.140 -                    return -EINVAL;
  15.141 -
  15.142 -                if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
  15.143 -                    return -EINVAL;
  15.144 -
  15.145 -                if ( (rc = direct_remap_pfn_range(vma,
  15.146 -                                                  msg[j].va&PAGE_MASK, 
  15.147 -                                                  msg[j].mfn, 
  15.148 -                                                  msg[j].npages<<PAGE_SHIFT, 
  15.149 -                                                  vma->vm_page_prot,
  15.150 -                                                  mmapcmd.dom)) < 0 )
  15.151 -                    return rc;
  15.152 -            }
  15.153 -        }
  15.154 -        ret = 0;
  15.155 -    }
  15.156 -    break;
  15.157 +		privcmd_mmap_t mmapcmd;
  15.158 +		privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
  15.159 +		int i, rc;
  15.160  
  15.161 -    case IOCTL_PRIVCMD_MMAPBATCH:
  15.162 -    {
  15.163 -        mmu_update_t u;
  15.164 -        privcmd_mmapbatch_t m;
  15.165 -        struct vm_area_struct *vma = NULL;
  15.166 -        unsigned long *p, addr;
  15.167 -        unsigned long mfn, ptep;
  15.168 -        int i;
  15.169 -
  15.170 -        if ( copy_from_user(&m, (void *)data, sizeof(m)) )
  15.171 -        { ret = -EFAULT; goto batch_err; }
  15.172 -
  15.173 -        vma = find_vma( current->mm, m.addr );
  15.174 -
  15.175 -        if ( !vma )
  15.176 -        { ret = -EINVAL; goto batch_err; }
  15.177 -
  15.178 -        if ( m.addr > PAGE_OFFSET )
  15.179 -        { ret = -EFAULT; goto batch_err; }
  15.180 -
  15.181 -        if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
  15.182 -        { ret = -EFAULT; goto batch_err; }
  15.183 +		if (copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)))
  15.184 +			return -EFAULT;
  15.185  
  15.186 -        p = m.arr;
  15.187 -        addr = m.addr;
  15.188 -        for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
  15.189 -        {
  15.190 -            if ( get_user(mfn, p) )
  15.191 -                return -EFAULT;
  15.192 -
  15.193 -            ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
  15.194 -            if (ret)
  15.195 -                goto batch_err;
  15.196 -
  15.197 -            u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
  15.198 -            u.ptr = ptep;
  15.199 +		p = mmapcmd.entry;
  15.200  
  15.201 -            if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0) )
  15.202 -                put_user(0xF0000000 | mfn, p);
  15.203 -        }
  15.204 -
  15.205 -        ret = 0;
  15.206 -        break;
  15.207 +		for (i = 0; i < mmapcmd.num;
  15.208 +		     i += PRIVCMD_MMAP_SZ, p += PRIVCMD_MMAP_SZ) {
  15.209 +			int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
  15.210 +				PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
  15.211  
  15.212 -    batch_err:
  15.213 -        printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n", 
  15.214 -               ret, vma, m.addr, m.num, m.arr,
  15.215 -               vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
  15.216 -        break;
  15.217 -    }
  15.218 -    break;
  15.219 +			if (copy_from_user(&msg, p,
  15.220 +					   n*sizeof(privcmd_mmap_entry_t)))
  15.221 +				return -EFAULT;
  15.222 +     
  15.223 +			for (j = 0; j < n; j++) {
  15.224 +				struct vm_area_struct *vma = 
  15.225 +					find_vma( current->mm, msg[j].va );
  15.226 +
  15.227 +				if (!vma)
  15.228 +					return -EINVAL;
  15.229 +
  15.230 +				if (msg[j].va > PAGE_OFFSET)
  15.231 +					return -EINVAL;
  15.232 +
  15.233 +				if ((msg[j].va + (msg[j].npages << PAGE_SHIFT))
  15.234 +				    > vma->vm_end )
  15.235 +					return -EINVAL;
  15.236 +
  15.237 +				if ((rc = direct_remap_pfn_range(
  15.238 +					vma,
  15.239 +					msg[j].va&PAGE_MASK, 
  15.240 +					msg[j].mfn, 
  15.241 +					msg[j].npages<<PAGE_SHIFT, 
  15.242 +					vma->vm_page_prot,
  15.243 +					mmapcmd.dom)) < 0)
  15.244 +					return rc;
  15.245 +			}
  15.246 +		}
  15.247 +		ret = 0;
  15.248 +	}
  15.249 +	break;
  15.250 +
  15.251 +	case IOCTL_PRIVCMD_MMAPBATCH: {
  15.252 +		mmu_update_t u;
  15.253 +		privcmd_mmapbatch_t m;
  15.254 +		struct vm_area_struct *vma = NULL;
  15.255 +		unsigned long *p, addr;
  15.256 +		unsigned long mfn, ptep;
  15.257 +		int i;
  15.258 +
  15.259 +		if (copy_from_user(&m, (void *)data, sizeof(m))) {
  15.260 +			ret = -EFAULT;
  15.261 +			goto batch_err;
  15.262 +		}
  15.263 +
  15.264 +		vma = find_vma( current->mm, m.addr );
  15.265 +		if (!vma) {
  15.266 +			ret = -EINVAL;
  15.267 +			goto batch_err;
  15.268 +		}
  15.269 +
  15.270 +		if (m.addr > PAGE_OFFSET) {
  15.271 +			ret = -EFAULT;
  15.272 +			goto batch_err;
  15.273 +		}
  15.274 +
  15.275 +		if ((m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end) {
  15.276 +			ret = -EFAULT;
  15.277 +			goto batch_err;
  15.278 +		}
  15.279 +
  15.280 +		p = m.arr;
  15.281 +		addr = m.addr;
  15.282 +		for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
  15.283 +			if (get_user(mfn, p))
  15.284 +				return -EFAULT;
  15.285 +
  15.286 +			ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
  15.287 +			if (ret)
  15.288 +				goto batch_err;
  15.289 +
  15.290 +			u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
  15.291 +			u.ptr = ptep;
  15.292 +
  15.293 +			if (HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0)
  15.294 +				put_user(0xF0000000 | mfn, p);
  15.295 +		}
  15.296 +
  15.297 +		ret = 0;
  15.298 +		break;
  15.299 +
  15.300 +	batch_err:
  15.301 +		printk("batch_err ret=%d vma=%p addr=%lx "
  15.302 +		       "num=%d arr=%p %lx-%lx\n", 
  15.303 +		       ret, vma, m.addr, m.num, m.arr,
  15.304 +		       vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
  15.305 +		break;
  15.306 +	}
  15.307 +	break;
  15.308  #endif
  15.309  
  15.310 -    case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
  15.311 -    {
  15.312 -        unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
  15.313 -        pgd_t *pgd = pgd_offset_k(m2pv);
  15.314 -        pud_t *pud = pud_offset(pgd, m2pv);
  15.315 -        pmd_t *pmd = pmd_offset(pud, m2pv);
  15.316 -        unsigned long m2p_start_mfn = (*(unsigned long *)pmd) >> PAGE_SHIFT; 
  15.317 -        ret = put_user(m2p_start_mfn, (unsigned long *)data) ? -EFAULT: 0;
  15.318 -    }
  15.319 -    break;
  15.320 -
  15.321 -    case IOCTL_PRIVCMD_INITDOMAIN_STORE:
  15.322 -    {
  15.323 -        extern int do_xenbus_probe(void*);
  15.324 -        unsigned long page;
  15.325 -
  15.326 -        if (xen_start_info->store_evtchn != 0) {
  15.327 -            ret = xen_start_info->store_mfn;
  15.328 -            break;
  15.329 -        }
  15.330 +	case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN: {
  15.331 +		unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
  15.332 +		pgd_t *pgd = pgd_offset_k(m2pv);
  15.333 +		pud_t *pud = pud_offset(pgd, m2pv);
  15.334 +		pmd_t *pmd = pmd_offset(pud, m2pv);
  15.335 +		unsigned long m2p_start_mfn =
  15.336 +			(*(unsigned long *)pmd) >> PAGE_SHIFT; 
  15.337 +		ret = put_user(m2p_start_mfn, (unsigned long *)data) ?
  15.338 +			-EFAULT: 0;
  15.339 +	}
  15.340 +	break;
  15.341  
  15.342 -        /* Allocate page. */
  15.343 -        page = get_zeroed_page(GFP_KERNEL);
  15.344 -        if (!page) {
  15.345 -            ret = -ENOMEM;
  15.346 -            break;
  15.347 -        }
  15.348 -
  15.349 -        /* We don't refcnt properly, so set reserved on page.
  15.350 -         * (this allocation is permanent) */
  15.351 -        SetPageReserved(virt_to_page(page));
  15.352 +	case IOCTL_PRIVCMD_INITDOMAIN_STORE: {
  15.353 +		extern int do_xenbus_probe(void*);
  15.354 +		unsigned long page;
  15.355  
  15.356 -        /* Initial connect. Setup channel and page. */
  15.357 -        xen_start_info->store_evtchn = data;
  15.358 -        xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >>
  15.359 -                                              PAGE_SHIFT);
  15.360 -        ret = xen_start_info->store_mfn;
  15.361 +		if (xen_start_info->store_evtchn != 0) {
  15.362 +			ret = xen_start_info->store_mfn;
  15.363 +			break;
  15.364 +		}
  15.365  
  15.366 -        /* We'll return then this will wait for daemon to answer */
  15.367 -        kthread_run(do_xenbus_probe, NULL, "xenbus_probe");
  15.368 -    }
  15.369 -    break;
  15.370 +		/* Allocate page. */
  15.371 +		page = get_zeroed_page(GFP_KERNEL);
  15.372 +		if (!page) {
  15.373 +			ret = -ENOMEM;
  15.374 +			break;
  15.375 +		}
  15.376  
  15.377 -    default:
  15.378 -        ret = -EINVAL;
  15.379 -        break;
  15.380 -    }
  15.381 -    return ret;
  15.382 +		/* We don't refcnt properly, so set reserved on page.
  15.383 +		 * (this allocation is permanent) */
  15.384 +		SetPageReserved(virt_to_page(page));
  15.385 +
  15.386 +		/* Initial connect. Setup channel and page. */
  15.387 +		xen_start_info->store_evtchn = data;
  15.388 +		xen_start_info->store_mfn =
  15.389 +			pfn_to_mfn(virt_to_phys((void *)page) >>
  15.390 +				   PAGE_SHIFT);
  15.391 +		ret = xen_start_info->store_mfn;
  15.392 +
  15.393 +		/* We'll return then this will wait for daemon to answer */
  15.394 +		kthread_run(do_xenbus_probe, NULL, "xenbus_probe");
  15.395 +	}
  15.396 +	break;
  15.397 +
  15.398 +	default:
  15.399 +		ret = -EINVAL;
  15.400 +		break;
  15.401 +	}
  15.402 +
  15.403 +	return ret;
  15.404  }
  15.405  
  15.406  static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
  15.407  {
  15.408 -    /* DONTCOPY is essential for Xen as copy_page_range is broken. */
  15.409 -    vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
  15.410 +	/* DONTCOPY is essential for Xen as copy_page_range is broken. */
  15.411 +	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
  15.412  
  15.413 -    return 0;
  15.414 +	return 0;
  15.415  }
  15.416  
  15.417  static struct file_operations privcmd_file_ops = {
  15.418 -    .ioctl = privcmd_ioctl,
  15.419 -    .mmap  = privcmd_mmap,
  15.420 +	.ioctl = privcmd_ioctl,
  15.421 +	.mmap  = privcmd_mmap,
  15.422  };
  15.423  
  15.424  
  15.425  static int __init privcmd_init(void)
  15.426  {
  15.427 -    privcmd_intf = create_xen_proc_entry("privcmd", 0400);
  15.428 -    if ( privcmd_intf != NULL )
  15.429 -        privcmd_intf->proc_fops = &privcmd_file_ops;
  15.430 +	privcmd_intf = create_xen_proc_entry("privcmd", 0400);
  15.431 +	if (privcmd_intf != NULL)
  15.432 +		privcmd_intf->proc_fops = &privcmd_file_ops;
  15.433  
  15.434 -    return 0;
  15.435 +	return 0;
  15.436  }
  15.437  
  15.438  __initcall(privcmd_init);
  15.439 +
  15.440 +/*
  15.441 + * Local variables:
  15.442 + *  c-file-style: "linux"
  15.443 + *  indent-tabs-mode: t
  15.444 + *  c-indent-level: 8
  15.445 + *  c-basic-offset: 8
  15.446 + *  tab-width: 8
  15.447 + * End:
  15.448 + */
    16.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h	Thu Sep 22 14:01:01 2005 +0100
    16.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h	Thu Sep 22 14:04:14 2005 +0100
    16.3 @@ -84,3 +84,13 @@ extern int num_frontends;
    16.4  #define MMAP_VADDR(t,_req) ((t)->mmap_vstart + ((_req) * PAGE_SIZE))
    16.5  
    16.6  #endif /* __TPMIF__BACKEND__COMMON_H__ */
    16.7 +
    16.8 +/*
    16.9 + * Local variables:
   16.10 + *  c-file-style: "linux"
   16.11 + *  indent-tabs-mode: t
   16.12 + *  c-indent-level: 8
   16.13 + *  c-basic-offset: 8
   16.14 + *  tab-width: 8
   16.15 + * End:
   16.16 + */
    17.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Thu Sep 22 14:01:01 2005 +0100
    17.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Thu Sep 22 14:04:14 2005 +0100
    17.3 @@ -1075,3 +1075,13 @@ tpmback_init(void)
    17.4  }
    17.5  
    17.6  __initcall(tpmback_init);
    17.7 +
    17.8 +/*
    17.9 + * Local variables:
   17.10 + *  c-file-style: "linux"
   17.11 + *  indent-tabs-mode: t
   17.12 + *  c-indent-level: 8
   17.13 + *  c-basic-offset: 8
   17.14 + *  tab-width: 8
   17.15 + * End:
   17.16 + */
    18.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c	Thu Sep 22 14:01:01 2005 +0100
    18.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c	Thu Sep 22 14:04:14 2005 +0100
    18.3 @@ -268,3 +268,13 @@ void tpmif_xenbus_init(void)
    18.4  {
    18.5  	xenbus_register_backend(&tpmback);
    18.6  }
    18.7 +
    18.8 +/*
    18.9 + * Local variables:
   18.10 + *  c-file-style: "linux"
   18.11 + *  indent-tabs-mode: t
   18.12 + *  c-indent-level: 8
   18.13 + *  c-basic-offset: 8
   18.14 + *  tab-width: 8
   18.15 + * End:
   18.16 + */
    19.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Thu Sep 22 14:01:01 2005 +0100
    19.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Thu Sep 22 14:04:14 2005 +0100
    19.3 @@ -741,3 +741,13 @@ tpmif_init(void)
    19.4  }
    19.5  
    19.6  __initcall(tpmif_init);
    19.7 +
    19.8 +/*
    19.9 + * Local variables:
   19.10 + *  c-file-style: "linux"
   19.11 + *  indent-tabs-mode: t
   19.12 + *  c-indent-level: 8
   19.13 + *  c-basic-offset: 8
   19.14 + *  tab-width: 8
   19.15 + * End:
   19.16 + */
    20.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h	Thu Sep 22 14:01:01 2005 +0100
    20.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h	Thu Sep 22 14:04:14 2005 +0100
    20.3 @@ -38,3 +38,13 @@ struct tx_buffer
    20.4  };
    20.5  
    20.6  #endif
    20.7 +
    20.8 +/*
    20.9 + * Local variables:
   20.10 + *  c-file-style: "linux"
   20.11 + *  indent-tabs-mode: t
   20.12 + *  c-indent-level: 8
   20.13 + *  c-basic-offset: 8
   20.14 + *  tab-width: 8
   20.15 + * End:
   20.16 + */
    21.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbback/control.c	Thu Sep 22 14:01:01 2005 +0100
    21.2 +++ b/linux-2.6-xen-sparse/drivers/xen/usbback/control.c	Thu Sep 22 14:04:14 2005 +0100
    21.3 @@ -59,3 +59,13 @@ void usbif_ctrlif_init(void)
    21.4      memcpy(cmsg.msg, &st, sizeof(st));
    21.5      ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
    21.6  }
    21.7 +
    21.8 +/*
    21.9 + * Local variables:
   21.10 + *  c-file-style: "linux"
   21.11 + *  indent-tabs-mode: t
   21.12 + *  c-indent-level: 8
   21.13 + *  c-basic-offset: 8
   21.14 + *  tab-width: 8
   21.15 + * End:
   21.16 + */
    22.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c	Thu Sep 22 14:01:01 2005 +0100
    22.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c	Thu Sep 22 14:04:14 2005 +0100
    22.3 @@ -231,3 +231,13 @@ void xb_suspend_comms(void)
    22.4  
    22.5  	unbind_evtchn_from_irqhandler(xen_start_info->store_evtchn, &xb_waitq);
    22.6  }
    22.7 +
    22.8 +/*
    22.9 + * Local variables:
   22.10 + *  c-file-style: "linux"
   22.11 + *  indent-tabs-mode: t
   22.12 + *  c-indent-level: 8
   22.13 + *  c-basic-offset: 8
   22.14 + *  tab-width: 8
   22.15 + * End:
   22.16 + */
    23.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h	Thu Sep 22 14:01:01 2005 +0100
    23.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h	Thu Sep 22 14:04:14 2005 +0100
    23.3 @@ -39,3 +39,13 @@ int xs_input_avail(void);
    23.4  extern wait_queue_head_t xb_waitq;
    23.5  
    23.6  #endif /* _XENBUS_COMMS_H */
    23.7 +
    23.8 +/*
    23.9 + * Local variables:
   23.10 + *  c-file-style: "linux"
   23.11 + *  indent-tabs-mode: t
   23.12 + *  c-indent-level: 8
   23.13 + *  c-basic-offset: 8
   23.14 + *  tab-width: 8
   23.15 + * End:
   23.16 + */
    24.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c	Thu Sep 22 14:01:01 2005 +0100
    24.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c	Thu Sep 22 14:04:14 2005 +0100
    24.3 @@ -186,3 +186,13 @@ xenbus_dev_init(void)
    24.4  }
    24.5  
    24.6  __initcall(xenbus_dev_init);
    24.7 +
    24.8 +/*
    24.9 + * Local variables:
   24.10 + *  c-file-style: "linux"
   24.11 + *  indent-tabs-mode: t
   24.12 + *  c-indent-level: 8
   24.13 + *  c-basic-offset: 8
   24.14 + *  tab-width: 8
   24.15 + * End:
   24.16 + */
    25.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c	Thu Sep 22 14:01:01 2005 +0100
    25.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c	Thu Sep 22 14:04:14 2005 +0100
    25.3 @@ -687,3 +687,13 @@ static int __init xenbus_probe_init(void
    25.4  }
    25.5  
    25.6  postcore_initcall(xenbus_probe_init);
    25.7 +
    25.8 +/*
    25.9 + * Local variables:
   25.10 + *  c-file-style: "linux"
   25.11 + *  indent-tabs-mode: t
   25.12 + *  c-indent-level: 8
   25.13 + *  c-basic-offset: 8
   25.14 + *  tab-width: 8
   25.15 + * End:
   25.16 + */
    26.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c	Thu Sep 22 14:01:01 2005 +0100
    26.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c	Thu Sep 22 14:04:14 2005 +0100
    26.3 @@ -566,3 +566,13 @@ int xs_init(void)
    26.4  		return PTR_ERR(watcher);
    26.5  	return 0;
    26.6  }
    26.7 +
    26.8 +/*
    26.9 + * Local variables:
   26.10 + *  c-file-style: "linux"
   26.11 + *  indent-tabs-mode: t
   26.12 + *  c-indent-level: 8
   26.13 + *  c-basic-offset: 8
   26.14 + *  tab-width: 8
   26.15 + * End:
   26.16 + */