ia64/xen-unstable

changeset 7022:10759a44ce3b

Merged.
author emellor@ewan
date Thu Sep 22 16:12:14 2005 +0100 (2005-09-22)
parents eba5afe9aa37 28db21fb7545
children 4be4126911dc
files linux-2.6-xen-sparse/arch/xen/Kconfig linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64 linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c linux-2.6-xen-sparse/drivers/xen/blkback/common.h linux-2.6-xen-sparse/drivers/xen/blkback/interface.c linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c linux-2.6-xen-sparse/drivers/xen/blkfront/block.h linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c linux-2.6-xen-sparse/drivers/xen/blktap/common.h linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c linux-2.6-xen-sparse/drivers/xen/console/console.c linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c linux-2.6-xen-sparse/drivers/xen/netback/common.h linux-2.6-xen-sparse/drivers/xen/netback/netback.c linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c linux-2.6-xen-sparse/drivers/xen/tpmback/common.h linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c tools/python/xen/xend/XendDomain.py tools/python/xen/xend/XendDomainInfo.py tools/python/xen/xend/image.py xen/include/public/io/netif.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/Kconfig	Thu Sep 22 16:05:44 2005 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/Kconfig	Thu Sep 22 16:12:14 2005 +0100
     1.3 @@ -111,13 +111,6 @@ config XEN_NETDEV_FRONTEND
     1.4  	  dedicated device-driver domain, or your master control domain
     1.5  	  (domain 0), then you almost certainly want to say Y here.
     1.6  
     1.7 -config XEN_NETDEV_GRANT
     1.8 -        bool "Grant table substrate for network drivers (DANGEROUS)"
     1.9 -        default n
    1.10 -        help
    1.11 -          This introduces the use of grant tables as a data exhange mechanism
    1.12 -          between the frontend and backend network drivers.
    1.13 -
    1.14  config XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
    1.15  	bool "Pipelined transmitter (DANGEROUS)"
    1.16  	depends on XEN_NETDEV_FRONTEND
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32	Thu Sep 22 16:05:44 2005 +0100
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32	Thu Sep 22 16:12:14 2005 +0100
     2.3 @@ -19,7 +19,6 @@ CONFIG_XEN_NETDEV_BACKEND=y
     2.4  # CONFIG_XEN_TPMDEV_BACKEND is not set
     2.5  CONFIG_XEN_BLKDEV_FRONTEND=y
     2.6  CONFIG_XEN_NETDEV_FRONTEND=y
     2.7 -CONFIG_XEN_NETDEV_GRANT=y
     2.8  # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
     2.9  # CONFIG_XEN_BLKDEV_TAP is not set
    2.10  # CONFIG_XEN_SHADOW_MODE is not set
     3.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64	Thu Sep 22 16:05:44 2005 +0100
     3.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64	Thu Sep 22 16:12:14 2005 +0100
     3.3 @@ -19,7 +19,6 @@ CONFIG_XEN_NETDEV_BACKEND=y
     3.4  # CONFIG_XEN_TPMDEV_BACKEND is not set
     3.5  CONFIG_XEN_BLKDEV_FRONTEND=y
     3.6  CONFIG_XEN_NETDEV_FRONTEND=y
     3.7 -CONFIG_XEN_NETDEV_GRANT=y
     3.8  # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
     3.9  # CONFIG_XEN_BLKDEV_TAP is not set
    3.10  # CONFIG_XEN_SHADOW_MODE is not set
     4.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32	Thu Sep 22 16:05:44 2005 +0100
     4.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32	Thu Sep 22 16:12:14 2005 +0100
     4.3 @@ -16,7 +16,6 @@ CONFIG_NO_IDLE_HZ=y
     4.4  # CONFIG_XEN_TPMDEV_BACKEND is not set
     4.5  CONFIG_XEN_BLKDEV_FRONTEND=y
     4.6  CONFIG_XEN_NETDEV_FRONTEND=y
     4.7 -CONFIG_XEN_NETDEV_GRANT=y
     4.8  # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
     4.9  # CONFIG_XEN_BLKDEV_TAP is not set
    4.10  # CONFIG_XEN_SHADOW_MODE is not set
     5.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64	Thu Sep 22 16:05:44 2005 +0100
     5.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64	Thu Sep 22 16:12:14 2005 +0100
     5.3 @@ -16,7 +16,6 @@ CONFIG_NO_IDLE_HZ=y
     5.4  # CONFIG_XEN_TPMDEV_BACKEND is not set
     5.5  CONFIG_XEN_BLKDEV_FRONTEND=y
     5.6  CONFIG_XEN_NETDEV_FRONTEND=y
     5.7 -CONFIG_XEN_NETDEV_GRANT=y
     5.8  # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
     5.9  # CONFIG_XEN_BLKDEV_TAP is not set
    5.10  # CONFIG_XEN_SHADOW_MODE is not set
     6.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32	Thu Sep 22 16:05:44 2005 +0100
     6.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32	Thu Sep 22 16:12:14 2005 +0100
     6.3 @@ -19,7 +19,6 @@ CONFIG_XEN_NETDEV_BACKEND=y
     6.4  # CONFIG_XEN_TPMDEV_BACKEND is not set
     6.5  CONFIG_XEN_BLKDEV_FRONTEND=y
     6.6  CONFIG_XEN_NETDEV_FRONTEND=y
     6.7 -CONFIG_XEN_NETDEV_GRANT=y
     6.8  # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
     6.9  # CONFIG_XEN_BLKDEV_TAP is not set
    6.10  # CONFIG_XEN_SHADOW_MODE is not set
     7.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64	Thu Sep 22 16:05:44 2005 +0100
     7.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64	Thu Sep 22 16:12:14 2005 +0100
     7.3 @@ -19,7 +19,6 @@ CONFIG_XEN_NETDEV_BACKEND=y
     7.4  # CONFIG_XEN_TPMDEV_BACKEND is not set
     7.5  CONFIG_XEN_BLKDEV_FRONTEND=y
     7.6  CONFIG_XEN_NETDEV_FRONTEND=y
     7.7 -CONFIG_XEN_NETDEV_GRANT=y
     7.8  # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
     7.9  # CONFIG_XEN_BLKDEV_TAP is not set
    7.10  # CONFIG_XEN_SHADOW_MODE is not set
     8.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Thu Sep 22 16:05:44 2005 +0100
     8.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Thu Sep 22 16:12:14 2005 +0100
     8.3 @@ -28,12 +28,12 @@
     8.4  #define BATCH_PER_DOMAIN 16
     8.5  
     8.6  static unsigned long mmap_vstart;
     8.7 -#define MMAP_PAGES                                              \
     8.8 -    (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
     8.9 -#define MMAP_VADDR(_req,_seg)                                   \
    8.10 -    (mmap_vstart +                                              \
    8.11 -     ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +    \
    8.12 -     ((_seg) * PAGE_SIZE))
    8.13 +#define MMAP_PAGES						\
    8.14 +	(MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
    8.15 +#define MMAP_VADDR(_req,_seg)						\
    8.16 +	(mmap_vstart +							\
    8.17 +	 ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +	\
    8.18 +	 ((_seg) * PAGE_SIZE))
    8.19  
    8.20  /*
    8.21   * Each outstanding request that we've passed to the lower device layers has a 
    8.22 @@ -42,12 +42,12 @@ static unsigned long mmap_vstart;
    8.23   * response queued for it, with the saved 'id' passed back.
    8.24   */
    8.25  typedef struct {
    8.26 -    blkif_t       *blkif;
    8.27 -    unsigned long  id;
    8.28 -    int            nr_pages;
    8.29 -    atomic_t       pendcnt;
    8.30 -    unsigned short operation;
    8.31 -    int            status;
    8.32 +	blkif_t       *blkif;
    8.33 +	unsigned long  id;
    8.34 +	int            nr_pages;
    8.35 +	atomic_t       pendcnt;
    8.36 +	unsigned short operation;
    8.37 +	int            status;
    8.38  } pending_req_t;
    8.39  
    8.40  /*
    8.41 @@ -68,14 +68,13 @@ static PEND_RING_IDX pending_prod, pendi
    8.42  static request_queue_t *plugged_queue;
    8.43  static inline void flush_plugged_queue(void)
    8.44  {
    8.45 -    request_queue_t *q = plugged_queue;
    8.46 -    if ( q != NULL )
    8.47 -    {
    8.48 -        if ( q->unplug_fn != NULL )
    8.49 -            q->unplug_fn(q);
    8.50 -        blk_put_queue(q);
    8.51 -        plugged_queue = NULL;
    8.52 -    }
    8.53 +	request_queue_t *q = plugged_queue;
    8.54 +	if (q != NULL) {
    8.55 +		if ( q->unplug_fn != NULL )
    8.56 +			q->unplug_fn(q);
    8.57 +		blk_put_queue(q);
    8.58 +		plugged_queue = NULL;
    8.59 +	}
    8.60  }
    8.61  
    8.62  /* When using grant tables to map a frame for device access then the
    8.63 @@ -106,24 +105,23 @@ static void make_response(blkif_t *blkif
    8.64  
    8.65  static void fast_flush_area(int idx, int nr_pages)
    8.66  {
    8.67 -    struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    8.68 -    unsigned int i, invcount = 0;
    8.69 -    u16 handle;
    8.70 +	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    8.71 +	unsigned int i, invcount = 0;
    8.72 +	u16 handle;
    8.73  
    8.74 -    for ( i = 0; i < nr_pages; i++ )
    8.75 -    {
    8.76 -        if ( BLKBACK_INVALID_HANDLE != ( handle = pending_handle(idx, i) ) )
    8.77 -        {
    8.78 -            unmap[i].host_addr      = MMAP_VADDR(idx, i);
    8.79 -            unmap[i].dev_bus_addr   = 0;
    8.80 -            unmap[i].handle         = handle;
    8.81 -            pending_handle(idx, i)  = BLKBACK_INVALID_HANDLE;
    8.82 -            invcount++;
    8.83 -        }
    8.84 -    }
    8.85 -    if ( unlikely(HYPERVISOR_grant_table_op(
    8.86 -                    GNTTABOP_unmap_grant_ref, unmap, invcount)))
    8.87 -        BUG();
    8.88 +	for (i = 0; i < nr_pages; i++) {
    8.89 +		handle = pending_handle(idx, i);
    8.90 +		if (handle == BLKBACK_INVALID_HANDLE)
    8.91 +			continue;
    8.92 +		unmap[i].host_addr      = MMAP_VADDR(idx, i);
    8.93 +		unmap[i].dev_bus_addr   = 0;
    8.94 +		unmap[i].handle         = handle;
    8.95 +		pending_handle(idx, i)  = BLKBACK_INVALID_HANDLE;
    8.96 +		invcount++;
    8.97 +	}
    8.98 +
    8.99 +	BUG_ON(HYPERVISOR_grant_table_op(
   8.100 +		GNTTABOP_unmap_grant_ref, unmap, invcount));
   8.101  }
   8.102  
   8.103  
   8.104 @@ -136,34 +134,38 @@ static spinlock_t blkio_schedule_list_lo
   8.105  
   8.106  static int __on_blkdev_list(blkif_t *blkif)
   8.107  {
   8.108 -    return blkif->blkdev_list.next != NULL;
   8.109 +	return blkif->blkdev_list.next != NULL;
   8.110  }
   8.111  
   8.112  static void remove_from_blkdev_list(blkif_t *blkif)
   8.113  {
   8.114 -    unsigned long flags;
   8.115 -    if ( !__on_blkdev_list(blkif) ) return;
   8.116 -    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   8.117 -    if ( __on_blkdev_list(blkif) )
   8.118 -    {
   8.119 -        list_del(&blkif->blkdev_list);
   8.120 -        blkif->blkdev_list.next = NULL;
   8.121 -        blkif_put(blkif);
   8.122 -    }
   8.123 -    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   8.124 +	unsigned long flags;
   8.125 +
   8.126 +	if (!__on_blkdev_list(blkif))
   8.127 +		return;
   8.128 +
   8.129 +	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   8.130 +	if (__on_blkdev_list(blkif)) {
   8.131 +		list_del(&blkif->blkdev_list);
   8.132 +		blkif->blkdev_list.next = NULL;
   8.133 +		blkif_put(blkif);
   8.134 +	}
   8.135 +	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   8.136  }
   8.137  
   8.138  static void add_to_blkdev_list_tail(blkif_t *blkif)
   8.139  {
   8.140 -    unsigned long flags;
   8.141 -    if ( __on_blkdev_list(blkif) ) return;
   8.142 -    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   8.143 -    if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
   8.144 -    {
   8.145 -        list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
   8.146 -        blkif_get(blkif);
   8.147 -    }
   8.148 -    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   8.149 +	unsigned long flags;
   8.150 +
   8.151 +	if (__on_blkdev_list(blkif))
   8.152 +		return;
   8.153 +
   8.154 +	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
   8.155 +	if (!__on_blkdev_list(blkif) && (blkif->status == CONNECTED)) {
   8.156 +		list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
   8.157 +		blkif_get(blkif);
   8.158 +	}
   8.159 +	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
   8.160  }
   8.161  
   8.162  
   8.163 @@ -175,54 +177,53 @@ static DECLARE_WAIT_QUEUE_HEAD(blkio_sch
   8.164  
   8.165  static int blkio_schedule(void *arg)
   8.166  {
   8.167 -    DECLARE_WAITQUEUE(wq, current);
   8.168 -
   8.169 -    blkif_t          *blkif;
   8.170 -    struct list_head *ent;
   8.171 -
   8.172 -    daemonize("xenblkd");
   8.173 +	DECLARE_WAITQUEUE(wq, current);
   8.174  
   8.175 -    for ( ; ; )
   8.176 -    {
   8.177 -        /* Wait for work to do. */
   8.178 -        add_wait_queue(&blkio_schedule_wait, &wq);
   8.179 -        set_current_state(TASK_INTERRUPTIBLE);
   8.180 -        if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
   8.181 -             list_empty(&blkio_schedule_list) )
   8.182 -            schedule();
   8.183 -        __set_current_state(TASK_RUNNING);
   8.184 -        remove_wait_queue(&blkio_schedule_wait, &wq);
   8.185 +	blkif_t          *blkif;
   8.186 +	struct list_head *ent;
   8.187  
   8.188 -        /* Queue up a batch of requests. */
   8.189 -        while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
   8.190 -                !list_empty(&blkio_schedule_list) )
   8.191 -        {
   8.192 -            ent = blkio_schedule_list.next;
   8.193 -            blkif = list_entry(ent, blkif_t, blkdev_list);
   8.194 -            blkif_get(blkif);
   8.195 -            remove_from_blkdev_list(blkif);
   8.196 -            if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
   8.197 -                add_to_blkdev_list_tail(blkif);
   8.198 -            blkif_put(blkif);
   8.199 -        }
   8.200 +	daemonize("xenblkd");
   8.201  
   8.202 -        /* Push the batch through to disc. */
   8.203 -        flush_plugged_queue();
   8.204 -    }
   8.205 +	for (;;) {
   8.206 +		/* Wait for work to do. */
   8.207 +		add_wait_queue(&blkio_schedule_wait, &wq);
   8.208 +		set_current_state(TASK_INTERRUPTIBLE);
   8.209 +		if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
   8.210 +		     list_empty(&blkio_schedule_list) )
   8.211 +			schedule();
   8.212 +		__set_current_state(TASK_RUNNING);
   8.213 +		remove_wait_queue(&blkio_schedule_wait, &wq);
   8.214 +
   8.215 +		/* Queue up a batch of requests. */
   8.216 +		while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
   8.217 +		       !list_empty(&blkio_schedule_list)) {
   8.218 +			ent = blkio_schedule_list.next;
   8.219 +			blkif = list_entry(ent, blkif_t, blkdev_list);
   8.220 +			blkif_get(blkif);
   8.221 +			remove_from_blkdev_list(blkif);
   8.222 +			if (do_block_io_op(blkif, BATCH_PER_DOMAIN))
   8.223 +				add_to_blkdev_list_tail(blkif);
   8.224 +			blkif_put(blkif);
   8.225 +		}
   8.226 +
   8.227 +		/* Push the batch through to disc. */
   8.228 +		flush_plugged_queue();
   8.229 +	}
   8.230  }
   8.231  
   8.232  static void maybe_trigger_blkio_schedule(void)
   8.233  {
   8.234 -    /*
   8.235 -     * Needed so that two processes, who together make the following predicate
   8.236 -     * true, don't both read stale values and evaluate the predicate
   8.237 -     * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
   8.238 -     */
   8.239 -    smp_mb();
   8.240 +	/*
   8.241 +	 * Needed so that two processes, which together make the following
   8.242 +	 * predicate true, don't both read stale values and evaluate the
   8.243 +	 * predicate incorrectly. Incredibly unlikely to stall the scheduler
   8.244 +	 * on x86, but...
   8.245 +	 */
   8.246 +	smp_mb();
   8.247  
   8.248 -    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
   8.249 -         !list_empty(&blkio_schedule_list) )
   8.250 -        wake_up(&blkio_schedule_wait);
   8.251 +	if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
   8.252 +	    !list_empty(&blkio_schedule_list))
   8.253 +		wake_up(&blkio_schedule_wait);
   8.254  }
   8.255  
   8.256  
   8.257 @@ -233,36 +234,34 @@ static void maybe_trigger_blkio_schedule
   8.258  
   8.259  static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
   8.260  {
   8.261 -    unsigned long flags;
   8.262 -
   8.263 -    /* An error fails the entire request. */
   8.264 -    if ( !uptodate )
   8.265 -    {
   8.266 -        DPRINTK("Buffer not up-to-date at end of operation\n");
   8.267 -        pending_req->status = BLKIF_RSP_ERROR;
   8.268 -    }
   8.269 +	unsigned long flags;
   8.270  
   8.271 -    if ( atomic_dec_and_test(&pending_req->pendcnt) )
   8.272 -    {
   8.273 -        int pending_idx = pending_req - pending_reqs;
   8.274 -        fast_flush_area(pending_idx, pending_req->nr_pages);
   8.275 -        make_response(pending_req->blkif, pending_req->id,
   8.276 -                      pending_req->operation, pending_req->status);
   8.277 -        blkif_put(pending_req->blkif);
   8.278 -        spin_lock_irqsave(&pend_prod_lock, flags);
   8.279 -        pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   8.280 -        spin_unlock_irqrestore(&pend_prod_lock, flags);
   8.281 -        maybe_trigger_blkio_schedule();
   8.282 -    }
   8.283 +	/* An error fails the entire request. */
   8.284 +	if (!uptodate) {
   8.285 +		DPRINTK("Buffer not up-to-date at end of operation\n");
   8.286 +		pending_req->status = BLKIF_RSP_ERROR;
   8.287 +	}
   8.288 +
   8.289 +	if (atomic_dec_and_test(&pending_req->pendcnt)) {
   8.290 +		int pending_idx = pending_req - pending_reqs;
   8.291 +		fast_flush_area(pending_idx, pending_req->nr_pages);
   8.292 +		make_response(pending_req->blkif, pending_req->id,
   8.293 +			      pending_req->operation, pending_req->status);
   8.294 +		blkif_put(pending_req->blkif);
   8.295 +		spin_lock_irqsave(&pend_prod_lock, flags);
   8.296 +		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   8.297 +		spin_unlock_irqrestore(&pend_prod_lock, flags);
   8.298 +		maybe_trigger_blkio_schedule();
   8.299 +	}
   8.300  }
   8.301  
   8.302  static int end_block_io_op(struct bio *bio, unsigned int done, int error)
   8.303  {
   8.304 -    if ( bio->bi_size != 0 )
   8.305 -        return 1;
   8.306 -    __end_block_io_op(bio->bi_private, !error);
   8.307 -    bio_put(bio);
   8.308 -    return error;
   8.309 +	if (bio->bi_size != 0)
   8.310 +		return 1;
   8.311 +	__end_block_io_op(bio->bi_private, !error);
   8.312 +	bio_put(bio);
   8.313 +	return error;
   8.314  }
   8.315  
   8.316  
   8.317 @@ -272,10 +271,10 @@ static int end_block_io_op(struct bio *b
   8.318  
   8.319  irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
   8.320  {
   8.321 -    blkif_t *blkif = dev_id;
   8.322 -    add_to_blkdev_list_tail(blkif);
   8.323 -    maybe_trigger_blkio_schedule();
   8.324 -    return IRQ_HANDLED;
   8.325 +	blkif_t *blkif = dev_id;
   8.326 +	add_to_blkdev_list_tail(blkif);
   8.327 +	maybe_trigger_blkio_schedule();
   8.328 +	return IRQ_HANDLED;
   8.329  }
   8.330  
   8.331  
   8.332 @@ -286,183 +285,174 @@ irqreturn_t blkif_be_int(int irq, void *
   8.333  
   8.334  static int do_block_io_op(blkif_t *blkif, int max_to_do)
   8.335  {
   8.336 -    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   8.337 -    blkif_request_t *req;
   8.338 -    RING_IDX i, rp;
   8.339 -    int more_to_do = 0;
   8.340 -
   8.341 -    rp = blk_ring->sring->req_prod;
   8.342 -    rmb(); /* Ensure we see queued requests up to 'rp'. */
   8.343 +	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   8.344 +	blkif_request_t *req;
   8.345 +	RING_IDX i, rp;
   8.346 +	int more_to_do = 0;
   8.347  
   8.348 -    for ( i = blk_ring->req_cons; 
   8.349 -         (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
   8.350 -          i++ )
   8.351 -    {
   8.352 -        if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
   8.353 -        {
   8.354 -            more_to_do = 1;
   8.355 -            break;
   8.356 -        }
   8.357 +	rp = blk_ring->sring->req_prod;
   8.358 +	rmb(); /* Ensure we see queued requests up to 'rp'. */
   8.359 +
   8.360 +	for (i = blk_ring->req_cons; 
   8.361 +	     (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
   8.362 +	     i++) {
   8.363 +		if ((max_to_do-- == 0) ||
   8.364 +		    (NR_PENDING_REQS == MAX_PENDING_REQS)) {
   8.365 +			more_to_do = 1;
   8.366 +			break;
   8.367 +		}
   8.368          
   8.369 -        req = RING_GET_REQUEST(blk_ring, i);
   8.370 -        switch ( req->operation )
   8.371 -        {
   8.372 -        case BLKIF_OP_READ:
   8.373 -        case BLKIF_OP_WRITE:
   8.374 -            dispatch_rw_block_io(blkif, req);
   8.375 -            break;
   8.376 +		req = RING_GET_REQUEST(blk_ring, i);
   8.377 +		switch (req->operation) {
   8.378 +		case BLKIF_OP_READ:
   8.379 +		case BLKIF_OP_WRITE:
   8.380 +			dispatch_rw_block_io(blkif, req);
   8.381 +			break;
   8.382  
   8.383 -        default:
   8.384 -            DPRINTK("error: unknown block io operation [%d]\n",
   8.385 -                    req->operation);
   8.386 -            make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
   8.387 -            break;
   8.388 -        }
   8.389 -    }
   8.390 +		default:
   8.391 +			DPRINTK("error: unknown block io operation [%d]\n",
   8.392 +				req->operation);
   8.393 +			make_response(blkif, req->id, req->operation,
   8.394 +				      BLKIF_RSP_ERROR);
   8.395 +			break;
   8.396 +		}
   8.397 +	}
   8.398  
   8.399 -    blk_ring->req_cons = i;
   8.400 -    return more_to_do;
   8.401 +	blk_ring->req_cons = i;
   8.402 +	return more_to_do;
   8.403  }
   8.404  
   8.405  static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
   8.406  {
   8.407 -    extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 
   8.408 -    int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
   8.409 -    unsigned long fas = 0;
   8.410 -    int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
   8.411 -    pending_req_t *pending_req;
   8.412 -    struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   8.413 -    struct phys_req preq;
   8.414 -    struct { 
   8.415 -        unsigned long buf; unsigned int nsec;
   8.416 -    } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   8.417 -    unsigned int nseg;
   8.418 -    struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   8.419 -    int nbio = 0;
   8.420 -    request_queue_t *q;
   8.421 -
   8.422 -    /* Check that number of segments is sane. */
   8.423 -    nseg = req->nr_segments;
   8.424 -    if ( unlikely(nseg == 0) || 
   8.425 -         unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
   8.426 -    {
   8.427 -        DPRINTK("Bad number of segments in request (%d)\n", nseg);
   8.428 -        goto bad_descriptor;
   8.429 -    }
   8.430 -
   8.431 -    preq.dev           = req->handle;
   8.432 -    preq.sector_number = req->sector_number;
   8.433 -    preq.nr_sects      = 0;
   8.434 -
   8.435 -    for ( i = 0; i < nseg; i++ )
   8.436 -    {
   8.437 -        fas         = req->frame_and_sects[i];
   8.438 -        seg[i].nsec = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
   8.439 -
   8.440 -        if ( seg[i].nsec <= 0 )
   8.441 -            goto bad_descriptor;
   8.442 -        preq.nr_sects += seg[i].nsec;
   8.443 -
   8.444 -        map[i].host_addr = MMAP_VADDR(pending_idx, i);
   8.445 -        map[i].dom = blkif->domid;
   8.446 -        map[i].ref = blkif_gref_from_fas(fas);
   8.447 -        map[i].flags = GNTMAP_host_map;
   8.448 -        if ( operation == WRITE )
   8.449 -            map[i].flags |= GNTMAP_readonly;
   8.450 -    }
   8.451 -
   8.452 -    if ( unlikely(HYPERVISOR_grant_table_op(
   8.453 -                    GNTTABOP_map_grant_ref, map, nseg)))
   8.454 -        BUG();
   8.455 -
   8.456 -    for ( i = 0; i < nseg; i++ )
   8.457 -    {
   8.458 -        if ( unlikely(map[i].handle < 0) )
   8.459 -        {
   8.460 -            DPRINTK("invalid buffer -- could not remap it\n");
   8.461 -            fast_flush_area(pending_idx, nseg);
   8.462 -            goto bad_descriptor;
   8.463 -        }
   8.464 -
   8.465 -        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
   8.466 -            FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
   8.467 -
   8.468 -        pending_handle(pending_idx, i) = map[i].handle;
   8.469 -    }
   8.470 +	extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 
   8.471 +	int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
   8.472 +	unsigned long fas = 0;
   8.473 +	int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
   8.474 +	pending_req_t *pending_req;
   8.475 +	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   8.476 +	struct phys_req preq;
   8.477 +	struct { 
   8.478 +		unsigned long buf; unsigned int nsec;
   8.479 +	} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   8.480 +	unsigned int nseg;
   8.481 +	struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   8.482 +	int nbio = 0;
   8.483 +	request_queue_t *q;
   8.484  
   8.485 -    for ( i = 0; i < nseg; i++ )
   8.486 -    {
   8.487 -        fas         = req->frame_and_sects[i];
   8.488 -        seg[i].buf  = map[i].dev_bus_addr | (blkif_first_sect(fas) << 9);
   8.489 -    }
   8.490 -
   8.491 -    if ( vbd_translate(&preq, blkif, operation) != 0 )
   8.492 -    {
   8.493 -        DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 
   8.494 -                operation == READ ? "read" : "write", preq.sector_number,
   8.495 -                preq.sector_number + preq.nr_sects, preq.dev); 
   8.496 -        goto bad_descriptor;
   8.497 -    }
   8.498 -
   8.499 -    pending_req = &pending_reqs[pending_idx];
   8.500 -    pending_req->blkif     = blkif;
   8.501 -    pending_req->id        = req->id;
   8.502 -    pending_req->operation = operation;
   8.503 -    pending_req->status    = BLKIF_RSP_OKAY;
   8.504 -    pending_req->nr_pages  = nseg;
   8.505 -
   8.506 -    for ( i = 0; i < nseg; i++ )
   8.507 -    {
   8.508 -        if ( ((int)preq.sector_number|(int)seg[i].nsec) &
   8.509 -             ((bdev_hardsect_size(preq.bdev) >> 9) - 1) )
   8.510 -        {
   8.511 -            DPRINTK("Misaligned I/O request from domain %d", blkif->domid);
   8.512 -            goto cleanup_and_fail;
   8.513 -        }
   8.514 +	/* Check that number of segments is sane. */
   8.515 +	nseg = req->nr_segments;
   8.516 +	if (unlikely(nseg == 0) || 
   8.517 +	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
   8.518 +		DPRINTK("Bad number of segments in request (%d)\n", nseg);
   8.519 +		goto bad_descriptor;
   8.520 +	}
   8.521  
   8.522 -        while ( (bio == NULL) ||
   8.523 -                (bio_add_page(bio,
   8.524 -                              virt_to_page(MMAP_VADDR(pending_idx, i)),
   8.525 -                              seg[i].nsec << 9,
   8.526 -                              seg[i].buf & ~PAGE_MASK) == 0) )
   8.527 -        {
   8.528 -            bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
   8.529 -            if ( unlikely(bio == NULL) )
   8.530 -            {
   8.531 -            cleanup_and_fail:
   8.532 -                for ( i = 0; i < (nbio-1); i++ )
   8.533 -                    bio_put(biolist[i]);
   8.534 -                fast_flush_area(pending_idx, nseg);
   8.535 -                goto bad_descriptor;
   8.536 -            }
   8.537 +	preq.dev           = req->handle;
   8.538 +	preq.sector_number = req->sector_number;
   8.539 +	preq.nr_sects      = 0;
   8.540 +
   8.541 +	for (i = 0; i < nseg; i++) {
   8.542 +		fas         = req->frame_and_sects[i];
   8.543 +		seg[i].nsec = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
   8.544 +
   8.545 +		if (seg[i].nsec <= 0)
   8.546 +			goto bad_descriptor;
   8.547 +		preq.nr_sects += seg[i].nsec;
   8.548 +
   8.549 +		map[i].host_addr = MMAP_VADDR(pending_idx, i);
   8.550 +		map[i].dom = blkif->domid;
   8.551 +		map[i].ref = blkif_gref_from_fas(fas);
   8.552 +		map[i].flags = GNTMAP_host_map;
   8.553 +		if ( operation == WRITE )
   8.554 +			map[i].flags |= GNTMAP_readonly;
   8.555 +	}
   8.556 +
   8.557 +	BUG_ON(HYPERVISOR_grant_table_op(
   8.558 +		GNTTABOP_map_grant_ref, map, nseg));
   8.559 +
   8.560 +	for (i = 0; i < nseg; i++) {
   8.561 +		if (unlikely(map[i].handle < 0)) {
   8.562 +			DPRINTK("invalid buffer -- could not remap it\n");
   8.563 +			fast_flush_area(pending_idx, nseg);
   8.564 +			goto bad_descriptor;
   8.565 +		}
   8.566 +
   8.567 +		phys_to_machine_mapping[__pa(MMAP_VADDR(
   8.568 +			pending_idx, i)) >> PAGE_SHIFT] =
   8.569 +			FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
   8.570 +
   8.571 +		pending_handle(pending_idx, i) = map[i].handle;
   8.572 +	}
   8.573 +
   8.574 +	for (i = 0; i < nseg; i++) {
   8.575 +		fas         = req->frame_and_sects[i];
   8.576 +		seg[i].buf  = map[i].dev_bus_addr | 
   8.577 +			(blkif_first_sect(fas) << 9);
   8.578 +	}
   8.579 +
   8.580 +	if (vbd_translate(&preq, blkif, operation) != 0) {
   8.581 +		DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 
   8.582 +			operation == READ ? "read" : "write",
   8.583 +			preq.sector_number,
   8.584 +			preq.sector_number + preq.nr_sects, preq.dev); 
   8.585 +		goto bad_descriptor;
   8.586 +	}
   8.587 +
   8.588 +	pending_req = &pending_reqs[pending_idx];
   8.589 +	pending_req->blkif     = blkif;
   8.590 +	pending_req->id        = req->id;
   8.591 +	pending_req->operation = operation;
   8.592 +	pending_req->status    = BLKIF_RSP_OKAY;
   8.593 +	pending_req->nr_pages  = nseg;
   8.594 +
   8.595 +	for (i = 0; i < nseg; i++) {
   8.596 +		if (((int)preq.sector_number|(int)seg[i].nsec) &
   8.597 +		    ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
   8.598 +			DPRINTK("Misaligned I/O request from domain %d",
   8.599 +				blkif->domid);
   8.600 +			goto cleanup_and_fail;
   8.601 +		}
   8.602 +
   8.603 +		while ((bio == NULL) ||
   8.604 +		       (bio_add_page(bio,
   8.605 +				     virt_to_page(MMAP_VADDR(pending_idx, i)),
   8.606 +				     seg[i].nsec << 9,
   8.607 +				     seg[i].buf & ~PAGE_MASK) == 0)) {
   8.608 +			bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
   8.609 +			if (unlikely(bio == NULL)) {
   8.610 +			cleanup_and_fail:
   8.611 +				for (i = 0; i < (nbio-1); i++)
   8.612 +					bio_put(biolist[i]);
   8.613 +				fast_flush_area(pending_idx, nseg);
   8.614 +				goto bad_descriptor;
   8.615 +			}
   8.616                  
   8.617 -            bio->bi_bdev    = preq.bdev;
   8.618 -            bio->bi_private = pending_req;
   8.619 -            bio->bi_end_io  = end_block_io_op;
   8.620 -            bio->bi_sector  = preq.sector_number;
   8.621 -        }
   8.622 -
   8.623 -        preq.sector_number += seg[i].nsec;
   8.624 -    }
   8.625 +			bio->bi_bdev    = preq.bdev;
   8.626 +			bio->bi_private = pending_req;
   8.627 +			bio->bi_end_io  = end_block_io_op;
   8.628 +			bio->bi_sector  = preq.sector_number;
   8.629 +		}
   8.630  
   8.631 -    if ( (q = bdev_get_queue(bio->bi_bdev)) != plugged_queue )
   8.632 -    {
   8.633 -        flush_plugged_queue();
   8.634 -        blk_get_queue(q);
   8.635 -        plugged_queue = q;
   8.636 -    }
   8.637 +		preq.sector_number += seg[i].nsec;
   8.638 +	}
   8.639  
   8.640 -    atomic_set(&pending_req->pendcnt, nbio);
   8.641 -    pending_cons++;
   8.642 -    blkif_get(blkif);
   8.643 +	if ((q = bdev_get_queue(bio->bi_bdev)) != plugged_queue) {
   8.644 +		flush_plugged_queue();
   8.645 +		blk_get_queue(q);
   8.646 +		plugged_queue = q;
   8.647 +	}
   8.648  
   8.649 -    for ( i = 0; i < nbio; i++ )
   8.650 -        submit_bio(operation, biolist[i]);
   8.651 +	atomic_set(&pending_req->pendcnt, nbio);
   8.652 +	pending_cons++;
   8.653 +	blkif_get(blkif);
   8.654  
   8.655 -    return;
   8.656 +	for (i = 0; i < nbio; i++)
   8.657 +		submit_bio(operation, biolist[i]);
   8.658 +
   8.659 +	return;
   8.660  
   8.661   bad_descriptor:
   8.662 -    make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
   8.663 +	make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
   8.664  } 
   8.665  
   8.666  
   8.667 @@ -475,66 +465,71 @@ static void dispatch_rw_block_io(blkif_t
   8.668  static void make_response(blkif_t *blkif, unsigned long id, 
   8.669                            unsigned short op, int st)
   8.670  {
   8.671 -    blkif_response_t *resp;
   8.672 -    unsigned long     flags;
   8.673 -    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   8.674 +	blkif_response_t *resp;
   8.675 +	unsigned long     flags;
   8.676 +	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   8.677  
   8.678 -    /* Place on the response ring for the relevant domain. */ 
   8.679 -    spin_lock_irqsave(&blkif->blk_ring_lock, flags);
   8.680 -    resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
   8.681 -    resp->id        = id;
   8.682 -    resp->operation = op;
   8.683 -    resp->status    = st;
   8.684 -    wmb(); /* Ensure other side can see the response fields. */
   8.685 -    blk_ring->rsp_prod_pvt++;
   8.686 -    RING_PUSH_RESPONSES(blk_ring);
   8.687 -    spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
   8.688 +	/* Place on the response ring for the relevant domain. */ 
   8.689 +	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
   8.690 +	resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
   8.691 +	resp->id        = id;
   8.692 +	resp->operation = op;
   8.693 +	resp->status    = st;
   8.694 +	wmb(); /* Ensure other side can see the response fields. */
   8.695 +	blk_ring->rsp_prod_pvt++;
   8.696 +	RING_PUSH_RESPONSES(blk_ring);
   8.697 +	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
   8.698  
   8.699 -    /* Kick the relevant domain. */
   8.700 -    notify_via_evtchn(blkif->evtchn);
   8.701 +	/* Kick the relevant domain. */
   8.702 +	notify_via_evtchn(blkif->evtchn);
   8.703  }
   8.704  
   8.705  void blkif_deschedule(blkif_t *blkif)
   8.706  {
   8.707 -    remove_from_blkdev_list(blkif);
   8.708 +	remove_from_blkdev_list(blkif);
   8.709  }
   8.710  
   8.711  static int __init blkif_init(void)
   8.712  {
   8.713 -    int i;
   8.714 -    struct page *page;
   8.715 -
   8.716 -    if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
   8.717 -         !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
   8.718 -        return 0;
   8.719 -
   8.720 -    blkif_interface_init();
   8.721 -
   8.722 -    page = balloon_alloc_empty_page_range(MMAP_PAGES);
   8.723 -    BUG_ON(page == NULL);
   8.724 -    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
   8.725 +	int i;
   8.726 +	struct page *page;
   8.727  
   8.728 -    pending_cons = 0;
   8.729 -    pending_prod = MAX_PENDING_REQS;
   8.730 -    memset(pending_reqs, 0, sizeof(pending_reqs));
   8.731 -    for ( i = 0; i < MAX_PENDING_REQS; i++ )
   8.732 -        pending_ring[i] = i;
   8.733 -    
   8.734 -    spin_lock_init(&blkio_schedule_list_lock);
   8.735 -    INIT_LIST_HEAD(&blkio_schedule_list);
   8.736 +	if (!(xen_start_info->flags & SIF_INITDOMAIN) &&
   8.737 +	    !(xen_start_info->flags & SIF_BLK_BE_DOMAIN))
   8.738 +		return 0;
   8.739  
   8.740 -    if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
   8.741 -        BUG();
   8.742 -
   8.743 -    blkif_xenbus_init();
   8.744 +	blkif_interface_init();
   8.745  
   8.746 -    memset( pending_grant_handles,  BLKBACK_INVALID_HANDLE, MMAP_PAGES );
   8.747 +	page = balloon_alloc_empty_page_range(MMAP_PAGES);
   8.748 +	BUG_ON(page == NULL);
   8.749 +	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
   8.750  
   8.751 -#ifdef CONFIG_XEN_BLKDEV_TAP_BE
   8.752 -    printk(KERN_ALERT "NOTE: Blkif backend is running with tap support on!\n");
   8.753 -#endif
   8.754 +	pending_cons = 0;
   8.755 +	pending_prod = MAX_PENDING_REQS;
   8.756 +	memset(pending_reqs, 0, sizeof(pending_reqs));
   8.757 +	for (i = 0; i < MAX_PENDING_REQS; i++)
   8.758 +		pending_ring[i] = i;
   8.759 +    
   8.760 +	spin_lock_init(&blkio_schedule_list_lock);
   8.761 +	INIT_LIST_HEAD(&blkio_schedule_list);
   8.762  
   8.763 -    return 0;
   8.764 +	BUG_ON(kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0);
   8.765 +
   8.766 +	blkif_xenbus_init();
   8.767 +
   8.768 +	memset(pending_grant_handles,  BLKBACK_INVALID_HANDLE, MMAP_PAGES);
   8.769 +
   8.770 +	return 0;
   8.771  }
   8.772  
   8.773  __initcall(blkif_init);
   8.774 +
   8.775 +/*
   8.776 + * Local variables:
   8.777 + *  c-file-style: "linux"
   8.778 + *  indent-tabs-mode: t
   8.779 + *  c-indent-level: 8
   8.780 + *  c-basic-offset: 8
   8.781 + *  tab-width: 8
   8.782 + * End:
   8.783 + */
     9.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/common.h	Thu Sep 22 16:05:44 2005 +0100
     9.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/common.h	Thu Sep 22 16:12:14 2005 +0100
     9.3 @@ -31,39 +31,39 @@
     9.4  #endif
     9.5  
     9.6  struct vbd {
     9.7 -    blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
     9.8 -    unsigned char  readonly;    /* Non-zero -> read-only */
     9.9 -    unsigned char  type;        /* VDISK_xxx */
    9.10 -    u32            pdevice;     /* phys device that this vbd maps to */
    9.11 -    struct block_device *bdev;
    9.12 +	blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
    9.13 +	unsigned char  readonly;    /* Non-zero -> read-only */
    9.14 +	unsigned char  type;        /* VDISK_xxx */
    9.15 +	u32            pdevice;     /* phys device that this vbd maps to */
    9.16 +	struct block_device *bdev;
    9.17  }; 
    9.18  
    9.19  typedef struct blkif_st {
    9.20 -    /* Unique identifier for this interface. */
    9.21 -    domid_t           domid;
    9.22 -    unsigned int      handle;
    9.23 -    /* Physical parameters of the comms window. */
    9.24 -    unsigned int      evtchn;
    9.25 -    unsigned int      remote_evtchn;
    9.26 -    /* Comms information. */
    9.27 -    blkif_back_ring_t blk_ring;
    9.28 -    struct vm_struct *blk_ring_area;
    9.29 -    /* VBDs attached to this interface. */
    9.30 -    struct vbd        vbd;
    9.31 -    /* Private fields. */
    9.32 -    enum { DISCONNECTED, CONNECTED } status;
    9.33 +	/* Unique identifier for this interface. */
    9.34 +	domid_t           domid;
    9.35 +	unsigned int      handle;
    9.36 +	/* Physical parameters of the comms window. */
    9.37 +	unsigned int      evtchn;
    9.38 +	unsigned int      remote_evtchn;
    9.39 +	/* Comms information. */
    9.40 +	blkif_back_ring_t blk_ring;
    9.41 +	struct vm_struct *blk_ring_area;
    9.42 +	/* VBDs attached to this interface. */
    9.43 +	struct vbd        vbd;
    9.44 +	/* Private fields. */
    9.45 +	enum { DISCONNECTED, CONNECTED } status;
    9.46  #ifdef CONFIG_XEN_BLKDEV_TAP_BE
    9.47 -    /* Is this a blktap frontend */
    9.48 -    unsigned int     is_blktap;
    9.49 +	/* Is this a blktap frontend */
    9.50 +	unsigned int     is_blktap;
    9.51  #endif
    9.52 -    struct list_head blkdev_list;
    9.53 -    spinlock_t       blk_ring_lock;
    9.54 -    atomic_t         refcnt;
    9.55 +	struct list_head blkdev_list;
    9.56 +	spinlock_t       blk_ring_lock;
    9.57 +	atomic_t         refcnt;
    9.58  
    9.59 -    struct work_struct free_work;
    9.60 +	struct work_struct free_work;
    9.61  
    9.62 -    u16         shmem_handle;
    9.63 -    grant_ref_t shmem_ref;
    9.64 +	u16         shmem_handle;
    9.65 +	grant_ref_t shmem_ref;
    9.66  } blkif_t;
    9.67  
    9.68  blkif_t *alloc_blkif(domid_t domid);
    9.69 @@ -71,11 +71,11 @@ void free_blkif_callback(blkif_t *blkif)
    9.70  int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
    9.71  
    9.72  #define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
    9.73 -#define blkif_put(_b)                             \
    9.74 -    do {                                          \
    9.75 -        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
    9.76 -            free_blkif_callback(_b);		  \
    9.77 -    } while (0)
    9.78 +#define blkif_put(_b)					\
    9.79 +	do {						\
    9.80 +		if (atomic_dec_and_test(&(_b)->refcnt))	\
    9.81 +			free_blkif_callback(_b);	\
    9.82 +	} while (0)
    9.83  
    9.84  /* Create a vbd. */
    9.85  int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, u32 pdevice,
    9.86 @@ -87,10 +87,10 @@ unsigned int vbd_info(struct vbd *vbd);
    9.87  unsigned long vbd_secsize(struct vbd *vbd);
    9.88  
    9.89  struct phys_req {
    9.90 -    unsigned short       dev;
    9.91 -    unsigned short       nr_sects;
    9.92 -    struct block_device *bdev;
    9.93 -    blkif_sector_t       sector_number;
    9.94 +	unsigned short       dev;
    9.95 +	unsigned short       nr_sects;
    9.96 +	struct block_device *bdev;
    9.97 +	blkif_sector_t       sector_number;
    9.98  };
    9.99  
   9.100  int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation); 
   9.101 @@ -104,3 +104,13 @@ void blkif_xenbus_init(void);
   9.102  irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
   9.103  
   9.104  #endif /* __BLKIF__BACKEND__COMMON_H__ */
   9.105 +
   9.106 +/*
   9.107 + * Local variables:
   9.108 + *  c-file-style: "linux"
   9.109 + *  indent-tabs-mode: t
   9.110 + *  c-indent-level: 8
   9.111 + *  c-basic-offset: 8
   9.112 + *  tab-width: 8
   9.113 + * End:
   9.114 + */
    10.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Thu Sep 22 16:05:44 2005 +0100
    10.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Thu Sep 22 16:12:14 2005 +0100
    10.3 @@ -13,134 +13,144 @@ static kmem_cache_t *blkif_cachep;
    10.4  
    10.5  blkif_t *alloc_blkif(domid_t domid)
    10.6  {
    10.7 -    blkif_t *blkif;
    10.8 -
    10.9 -    blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
   10.10 -    if (!blkif)
   10.11 -	    return ERR_PTR(-ENOMEM);
   10.12 +	blkif_t *blkif;
   10.13  
   10.14 -    memset(blkif, 0, sizeof(*blkif));
   10.15 -    blkif->domid = domid;
   10.16 -    blkif->status = DISCONNECTED;
   10.17 -    spin_lock_init(&blkif->blk_ring_lock);
   10.18 -    atomic_set(&blkif->refcnt, 1);
   10.19 +	blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
   10.20 +	if (!blkif)
   10.21 +		return ERR_PTR(-ENOMEM);
   10.22  
   10.23 -    return blkif;
   10.24 +	memset(blkif, 0, sizeof(*blkif));
   10.25 +	blkif->domid = domid;
   10.26 +	blkif->status = DISCONNECTED;
   10.27 +	spin_lock_init(&blkif->blk_ring_lock);
   10.28 +	atomic_set(&blkif->refcnt, 1);
   10.29 +
   10.30 +	return blkif;
   10.31  }
   10.32  
   10.33  static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
   10.34  {
   10.35 -    struct gnttab_map_grant_ref op;
   10.36 -
   10.37 -    op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
   10.38 -    op.flags     = GNTMAP_host_map;
   10.39 -    op.ref       = shared_page;
   10.40 -    op.dom       = blkif->domid;
   10.41 +	struct gnttab_map_grant_ref op;
   10.42  
   10.43 -    lock_vm_area(blkif->blk_ring_area);
   10.44 -    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
   10.45 -    unlock_vm_area(blkif->blk_ring_area);
   10.46 +	op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
   10.47 +	op.flags     = GNTMAP_host_map;
   10.48 +	op.ref       = shared_page;
   10.49 +	op.dom       = blkif->domid;
   10.50  
   10.51 -    if (op.handle < 0) {
   10.52 -	DPRINTK(" Grant table operation failure !\n");
   10.53 -	return op.handle;
   10.54 -    }
   10.55 +	lock_vm_area(blkif->blk_ring_area);
   10.56 +	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
   10.57 +	unlock_vm_area(blkif->blk_ring_area);
   10.58  
   10.59 -    blkif->shmem_ref = shared_page;
   10.60 -    blkif->shmem_handle = op.handle;
   10.61 +	if (op.handle < 0) {
   10.62 +		DPRINTK(" Grant table operation failure !\n");
   10.63 +		return op.handle;
   10.64 +	}
   10.65  
   10.66 -    return 0;
   10.67 +	blkif->shmem_ref = shared_page;
   10.68 +	blkif->shmem_handle = op.handle;
   10.69 +
   10.70 +	return 0;
   10.71  }
   10.72  
   10.73  static void unmap_frontend_page(blkif_t *blkif)
   10.74  {
   10.75 -    struct gnttab_unmap_grant_ref op;
   10.76 +	struct gnttab_unmap_grant_ref op;
   10.77  
   10.78 -    op.host_addr    = (unsigned long)blkif->blk_ring_area->addr;
   10.79 -    op.handle       = blkif->shmem_handle;
   10.80 -    op.dev_bus_addr = 0;
   10.81 +	op.host_addr    = (unsigned long)blkif->blk_ring_area->addr;
   10.82 +	op.handle       = blkif->shmem_handle;
   10.83 +	op.dev_bus_addr = 0;
   10.84  
   10.85 -    lock_vm_area(blkif->blk_ring_area);
   10.86 -    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
   10.87 -    unlock_vm_area(blkif->blk_ring_area);
   10.88 +	lock_vm_area(blkif->blk_ring_area);
   10.89 +	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
   10.90 +	unlock_vm_area(blkif->blk_ring_area);
   10.91  }
   10.92  
   10.93  int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
   10.94  {
   10.95 -    blkif_sring_t *sring;
   10.96 -    evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
   10.97 -    int err;
   10.98 -
   10.99 -    BUG_ON(blkif->remote_evtchn);
  10.100 -
  10.101 -    if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
  10.102 -	return -ENOMEM;
  10.103 -
  10.104 -    err = map_frontend_page(blkif, shared_page);
  10.105 -    if (err) {
  10.106 -        free_vm_area(blkif->blk_ring_area);
  10.107 -	return err;
  10.108 -    }
  10.109 +	blkif_sring_t *sring;
  10.110 +	evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
  10.111 +	int err;
  10.112  
  10.113 -    op.u.bind_interdomain.dom1 = DOMID_SELF;
  10.114 -    op.u.bind_interdomain.dom2 = blkif->domid;
  10.115 -    op.u.bind_interdomain.port1 = 0;
  10.116 -    op.u.bind_interdomain.port2 = evtchn;
  10.117 -    err = HYPERVISOR_event_channel_op(&op);
  10.118 -    if (err) {
  10.119 -	unmap_frontend_page(blkif);
  10.120 -        free_vm_area(blkif->blk_ring_area);
  10.121 -	return err;
  10.122 -    }
  10.123 +	BUG_ON(blkif->remote_evtchn);
  10.124  
  10.125 -    blkif->evtchn = op.u.bind_interdomain.port1;
  10.126 -    blkif->remote_evtchn = evtchn;
  10.127 +	if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
  10.128 +		return -ENOMEM;
  10.129  
  10.130 -    sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
  10.131 -    SHARED_RING_INIT(sring);
  10.132 -    BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
  10.133 +	err = map_frontend_page(blkif, shared_page);
  10.134 +	if (err) {
  10.135 +		free_vm_area(blkif->blk_ring_area);
  10.136 +		return err;
  10.137 +	}
  10.138  
  10.139 -    bind_evtchn_to_irqhandler(blkif->evtchn, blkif_be_int, 0, "blkif-backend",
  10.140 -			      blkif);
  10.141 -    blkif->status = CONNECTED;
  10.142 +	op.u.bind_interdomain.dom1 = DOMID_SELF;
  10.143 +	op.u.bind_interdomain.dom2 = blkif->domid;
  10.144 +	op.u.bind_interdomain.port1 = 0;
  10.145 +	op.u.bind_interdomain.port2 = evtchn;
  10.146 +	err = HYPERVISOR_event_channel_op(&op);
  10.147 +	if (err) {
  10.148 +		unmap_frontend_page(blkif);
  10.149 +		free_vm_area(blkif->blk_ring_area);
  10.150 +		return err;
  10.151 +	}
  10.152  
  10.153 -    return 0;
  10.154 +	blkif->evtchn = op.u.bind_interdomain.port1;
  10.155 +	blkif->remote_evtchn = evtchn;
  10.156 +
  10.157 +	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
  10.158 +	SHARED_RING_INIT(sring);
  10.159 +	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
  10.160 +
  10.161 +	bind_evtchn_to_irqhandler(
  10.162 +		blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);
  10.163 +	blkif->status = CONNECTED;
  10.164 +
  10.165 +	return 0;
  10.166  }
  10.167  
  10.168  static void free_blkif(void *arg)
  10.169  {
  10.170 -    evtchn_op_t op = { .cmd = EVTCHNOP_close };
  10.171 -    blkif_t *blkif = (blkif_t *)arg;
  10.172 -
  10.173 -    op.u.close.port = blkif->evtchn;
  10.174 -    op.u.close.dom = DOMID_SELF;
  10.175 -    HYPERVISOR_event_channel_op(&op);
  10.176 -    op.u.close.port = blkif->remote_evtchn;
  10.177 -    op.u.close.dom = blkif->domid;
  10.178 -    HYPERVISOR_event_channel_op(&op);
  10.179 +	evtchn_op_t op = { .cmd = EVTCHNOP_close };
  10.180 +	blkif_t *blkif = (blkif_t *)arg;
  10.181  
  10.182 -    vbd_free(&blkif->vbd);
  10.183 -
  10.184 -    if (blkif->evtchn)
  10.185 -        unbind_evtchn_from_irqhandler(blkif->evtchn, blkif);
  10.186 +	op.u.close.port = blkif->evtchn;
  10.187 +	op.u.close.dom = DOMID_SELF;
  10.188 +	HYPERVISOR_event_channel_op(&op);
  10.189 +	op.u.close.port = blkif->remote_evtchn;
  10.190 +	op.u.close.dom = blkif->domid;
  10.191 +	HYPERVISOR_event_channel_op(&op);
  10.192  
  10.193 -    if (blkif->blk_ring.sring) {
  10.194 -	unmap_frontend_page(blkif);
  10.195 -        free_vm_area(blkif->blk_ring_area);
  10.196 -	blkif->blk_ring.sring = NULL;
  10.197 -    }
  10.198 +	vbd_free(&blkif->vbd);
  10.199  
  10.200 -    kmem_cache_free(blkif_cachep, blkif);
  10.201 +	if (blkif->evtchn)
  10.202 +		unbind_evtchn_from_irqhandler(blkif->evtchn, blkif);
  10.203 +
  10.204 +	if (blkif->blk_ring.sring) {
  10.205 +		unmap_frontend_page(blkif);
  10.206 +		free_vm_area(blkif->blk_ring_area);
  10.207 +		blkif->blk_ring.sring = NULL;
  10.208 +	}
  10.209 +
  10.210 +	kmem_cache_free(blkif_cachep, blkif);
  10.211  }
  10.212  
  10.213  void free_blkif_callback(blkif_t *blkif)
  10.214  {
  10.215 -    INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
  10.216 -    schedule_work(&blkif->free_work);
  10.217 +	INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
  10.218 +	schedule_work(&blkif->free_work);
  10.219  }
  10.220  
  10.221  void __init blkif_interface_init(void)
  10.222  {
  10.223 -    blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 
  10.224 -                                     0, 0, NULL, NULL);
  10.225 +	blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 
  10.226 +					 0, 0, NULL, NULL);
  10.227  }
  10.228 +
  10.229 +/*
  10.230 + * Local variables:
  10.231 + *  c-file-style: "linux"
  10.232 + *  indent-tabs-mode: t
  10.233 + *  c-indent-level: 8
  10.234 + *  c-basic-offset: 8
  10.235 + *  tab-width: 8
  10.236 + * End:
  10.237 + */
    11.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c	Thu Sep 22 16:05:44 2005 +0100
    11.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c	Thu Sep 22 16:12:14 2005 +0100
    11.3 @@ -11,10 +11,10 @@
    11.4  
    11.5  static inline dev_t vbd_map_devnum(u32 cookie)
    11.6  {
    11.7 -    return MKDEV(BLKIF_MAJOR(cookie), BLKIF_MINOR(cookie));
    11.8 +	return MKDEV(BLKIF_MAJOR(cookie), BLKIF_MINOR(cookie));
    11.9  }
   11.10 -#define vbd_sz(_v)   ((_v)->bdev->bd_part ? \
   11.11 -    (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
   11.12 +#define vbd_sz(_v)   ((_v)->bdev->bd_part ?				\
   11.13 +	(_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
   11.14  #define bdev_put(_b) blkdev_put(_b)
   11.15  
   11.16  unsigned long vbd_size(struct vbd *vbd)
   11.17 @@ -35,63 +35,73 @@ unsigned long vbd_secsize(struct vbd *vb
   11.18  int vbd_create(blkif_t *blkif, blkif_vdev_t handle,
   11.19  	       u32 pdevice, int readonly)
   11.20  {
   11.21 -    struct vbd *vbd;
   11.22 -
   11.23 -    vbd = &blkif->vbd;
   11.24 -    vbd->handle   = handle; 
   11.25 -    vbd->readonly = readonly;
   11.26 -    vbd->type     = 0;
   11.27 -
   11.28 -    vbd->pdevice  = pdevice;
   11.29 +	struct vbd *vbd;
   11.30  
   11.31 -    vbd->bdev = open_by_devnum(
   11.32 -        vbd_map_devnum(vbd->pdevice),
   11.33 -        vbd->readonly ? FMODE_READ : FMODE_WRITE);
   11.34 -    if ( IS_ERR(vbd->bdev) )
   11.35 -    {
   11.36 -        DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
   11.37 -        return -ENOENT;
   11.38 -    }
   11.39 +	vbd = &blkif->vbd;
   11.40 +	vbd->handle   = handle; 
   11.41 +	vbd->readonly = readonly;
   11.42 +	vbd->type     = 0;
   11.43  
   11.44 -    if ( (vbd->bdev->bd_disk == NULL) )
   11.45 -    {
   11.46 -        DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
   11.47 -	vbd_free(vbd);
   11.48 -        return -ENOENT;
   11.49 -    }
   11.50 +	vbd->pdevice  = pdevice;
   11.51  
   11.52 -    if ( vbd->bdev->bd_disk->flags & GENHD_FL_CD )
   11.53 -        vbd->type |= VDISK_CDROM;
   11.54 -    if ( vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE )
   11.55 -        vbd->type |= VDISK_REMOVABLE;
   11.56 +	vbd->bdev = open_by_devnum(
   11.57 +		vbd_map_devnum(vbd->pdevice),
   11.58 +		vbd->readonly ? FMODE_READ : FMODE_WRITE);
   11.59 +	if (IS_ERR(vbd->bdev)) {
   11.60 +		DPRINTK("vbd_creat: device %08x doesn't exist.\n",
   11.61 +			vbd->pdevice);
   11.62 +		return -ENOENT;
   11.63 +	}
   11.64  
   11.65 -    DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
   11.66 -            handle, blkif->domid);
   11.67 -    return 0;
   11.68 +	if (vbd->bdev->bd_disk == NULL) {
   11.69 +		DPRINTK("vbd_creat: device %08x doesn't exist.\n",
   11.70 +			vbd->pdevice);
   11.71 +		vbd_free(vbd);
   11.72 +		return -ENOENT;
   11.73 +	}
   11.74 +
   11.75 +	if (vbd->bdev->bd_disk->flags & GENHD_FL_CD)
   11.76 +		vbd->type |= VDISK_CDROM;
   11.77 +	if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
   11.78 +		vbd->type |= VDISK_REMOVABLE;
   11.79 +
   11.80 +	DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
   11.81 +		handle, blkif->domid);
   11.82 +	return 0;
   11.83  }
   11.84  
   11.85  void vbd_free(struct vbd *vbd)
   11.86  {
   11.87 -    if (vbd->bdev)
   11.88 -	bdev_put(vbd->bdev);
   11.89 -    vbd->bdev = NULL;
   11.90 +	if (vbd->bdev)
   11.91 +		bdev_put(vbd->bdev);
   11.92 +	vbd->bdev = NULL;
   11.93  }
   11.94  
   11.95  int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
   11.96  {
   11.97 -    struct vbd *vbd = &blkif->vbd;
   11.98 -    int rc = -EACCES;
   11.99 -
  11.100 -    if ((operation == WRITE) && vbd->readonly)
  11.101 -        goto out;
  11.102 +	struct vbd *vbd = &blkif->vbd;
  11.103 +	int rc = -EACCES;
  11.104  
  11.105 -    if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
  11.106 -        goto out;
  11.107 +	if ((operation == WRITE) && vbd->readonly)
  11.108 +		goto out;
  11.109  
  11.110 -    req->dev  = vbd->pdevice;
  11.111 -    req->bdev = vbd->bdev;
  11.112 -    rc = 0;
  11.113 +	if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
  11.114 +		goto out;
  11.115 +
  11.116 +	req->dev  = vbd->pdevice;
  11.117 +	req->bdev = vbd->bdev;
  11.118 +	rc = 0;
  11.119  
  11.120   out:
  11.121 -    return rc;
  11.122 +	return rc;
  11.123  }
  11.124 +
  11.125 +/*
  11.126 + * Local variables:
  11.127 + *  c-file-style: "linux"
  11.128 + *  indent-tabs-mode: t
  11.129 + *  c-indent-level: 8
  11.130 + *  c-basic-offset: 8
  11.131 + *  tab-width: 8
  11.132 + * End:
  11.133 + */
    12.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c	Thu Sep 22 16:05:44 2005 +0100
    12.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c	Thu Sep 22 16:12:14 2005 +0100
    12.3 @@ -124,7 +124,7 @@ static void frontend_changed(struct xenb
    12.4  
    12.5  	return;
    12.6  
    12.7 -abort:
    12.8 + abort:
    12.9  	xenbus_transaction_end(1);
   12.10  }
   12.11  
   12.12 @@ -278,3 +278,13 @@ void blkif_xenbus_init(void)
   12.13  {
   12.14  	xenbus_register_backend(&blkback);
   12.15  }
   12.16 +
   12.17 +/*
   12.18 + * Local variables:
   12.19 + *  c-file-style: "linux"
   12.20 + *  indent-tabs-mode: t
   12.21 + *  c-indent-level: 8
   12.22 + *  c-basic-offset: 8
   12.23 + *  tab-width: 8
   12.24 + * End:
   12.25 + */
    13.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h	Thu Sep 22 16:05:44 2005 +0100
    13.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h	Thu Sep 22 16:12:14 2005 +0100
    13.3 @@ -146,4 +146,15 @@ extern void do_blkif_request (request_qu
    13.4  int xlvbd_add(blkif_sector_t capacity, int device,
    13.5  	      u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
    13.6  void xlvbd_del(struct blkfront_info *info);
    13.7 +
    13.8  #endif /* __XEN_DRIVERS_BLOCK_H__ */
    13.9 +
   13.10 +/*
   13.11 + * Local variables:
   13.12 + *  c-file-style: "linux"
   13.13 + *  indent-tabs-mode: t
   13.14 + *  c-indent-level: 8
   13.15 + *  c-basic-offset: 8
   13.16 + *  tab-width: 8
   13.17 + * End:
   13.18 + */
    14.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c	Thu Sep 22 16:05:44 2005 +0100
    14.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c	Thu Sep 22 16:12:14 2005 +0100
    14.3 @@ -65,7 +65,7 @@ static struct xlbd_type_info xlbd_vbd_ty
    14.4  };
    14.5  
    14.6  static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
    14.7 -					  NUM_VBD_MAJORS];
    14.8 +					 NUM_VBD_MAJORS];
    14.9  
   14.10  #define XLBD_MAJOR_IDE_START	0
   14.11  #define XLBD_MAJOR_SCSI_START	(NUM_IDE_MAJORS)
   14.12 @@ -309,3 +309,13 @@ xlvbd_del(struct blkfront_info *info)
   14.13  
   14.14  	bdput(bd);
   14.15  }
   14.16 +
   14.17 +/*
   14.18 + * Local variables:
   14.19 + *  c-file-style: "linux"
   14.20 + *  indent-tabs-mode: t
   14.21 + *  c-indent-level: 8
   14.22 + *  c-basic-offset: 8
   14.23 + *  tab-width: 8
   14.24 + * End:
   14.25 + */
    15.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Thu Sep 22 16:05:44 2005 +0100
    15.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Thu Sep 22 16:12:14 2005 +0100
    15.3 @@ -4,7 +4,6 @@
    15.4   * This is a modified version of the block backend driver that remaps requests
    15.5   * to a user-space memory region.  It is intended to be used to write 
    15.6   * application-level servers that provide block interfaces to client VMs.
    15.7 - * 
    15.8   */
    15.9  
   15.10  #include <linux/kernel.h>
   15.11 @@ -67,20 +66,19 @@ static int blktap_read_ufe_ring(void);
   15.12  
   15.13  static inline int BLKTAP_MODE_VALID(unsigned long arg)
   15.14  {
   15.15 -    return (
   15.16 -        ( arg == BLKTAP_MODE_PASSTHROUGH  ) ||
   15.17 -        ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
   15.18 -        ( arg == BLKTAP_MODE_INTERPOSE    ) );
   15.19 +	return ((arg == BLKTAP_MODE_PASSTHROUGH ) ||
   15.20 +		(arg == BLKTAP_MODE_INTERCEPT_FE) ||
   15.21 +		(arg == BLKTAP_MODE_INTERPOSE   ));
   15.22  /*
   15.23 -    return (
   15.24 -        ( arg == BLKTAP_MODE_PASSTHROUGH  ) ||
   15.25 -        ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
   15.26 -        ( arg == BLKTAP_MODE_INTERCEPT_BE ) ||
   15.27 -        ( arg == BLKTAP_MODE_INTERPOSE    ) ||
   15.28 -        ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) ||
   15.29 -        ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) ||
   15.30 -        ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH )
   15.31 -        );
   15.32 +  return (
   15.33 +  ( arg == BLKTAP_MODE_PASSTHROUGH  ) ||
   15.34 +  ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
   15.35 +  ( arg == BLKTAP_MODE_INTERCEPT_BE ) ||
   15.36 +  ( arg == BLKTAP_MODE_INTERPOSE    ) ||
   15.37 +  ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) ||
   15.38 +  ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) ||
   15.39 +  ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH )
   15.40 +  );
   15.41  */
   15.42  }
   15.43  
   15.44 @@ -110,14 +108,12 @@ unsigned long mmap_vstart;  /* Kernel pa
   15.45  unsigned long rings_vstart; /* start of mmaped vma               */
   15.46  unsigned long user_vstart;  /* start of user mappings            */
   15.47  
   15.48 -#define MMAP_PAGES                                              \
   15.49 -    (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
   15.50 -#define MMAP_VADDR(_start, _req,_seg)                           \
   15.51 -    (_start +                                                   \
   15.52 -     ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +    \
   15.53 -     ((_seg) * PAGE_SIZE))
   15.54 -
   15.55 -
   15.56 +#define MMAP_PAGES						\
   15.57 +	(MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
   15.58 +#define MMAP_VADDR(_start, _req,_seg)					\
   15.59 +	(_start +							\
   15.60 +	 ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +	\
   15.61 +	 ((_seg) * PAGE_SIZE))
   15.62  
   15.63  /*
   15.64   * Each outstanding request that we've passed to the lower device layers has a 
   15.65 @@ -126,12 +122,12 @@ unsigned long user_vstart;  /* start of 
   15.66   * response queued for it, with the saved 'id' passed back.
   15.67   */
   15.68  typedef struct {
   15.69 -    blkif_t       *blkif;
   15.70 -    unsigned long  id;
   15.71 -    int            nr_pages;
   15.72 -    atomic_t       pendcnt;
   15.73 -    unsigned short operation;
   15.74 -    int            status;
   15.75 +	blkif_t       *blkif;
   15.76 +	unsigned long  id;
   15.77 +	int            nr_pages;
   15.78 +	atomic_t       pendcnt;
   15.79 +	unsigned short operation;
   15.80 +	int            status;
   15.81  } pending_req_t;
   15.82  
   15.83  /*
   15.84 @@ -156,17 +152,17 @@ static PEND_RING_IDX pending_prod, pendi
   15.85  
   15.86  static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx)
   15.87  {
   15.88 -    return ( (fe_dom << 16) | MASK_PEND_IDX(idx) );
   15.89 +	return ((fe_dom << 16) | MASK_PEND_IDX(idx));
   15.90  }
   15.91  
   15.92  extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id) 
   15.93  { 
   15.94 -    return (PEND_RING_IDX)( id & 0x0000ffff );
   15.95 +	return (PEND_RING_IDX)(id & 0x0000ffff);
   15.96  }
   15.97  
   15.98  extern inline domid_t ID_TO_DOM(unsigned long id) 
   15.99  { 
  15.100 -    return (domid_t)(id >> 16); 
  15.101 +	return (domid_t)(id >> 16); 
  15.102  }
  15.103  
  15.104  
  15.105 @@ -181,8 +177,8 @@ extern inline domid_t ID_TO_DOM(unsigned
  15.106   */
  15.107  struct grant_handle_pair
  15.108  {
  15.109 -    u16  kernel;
  15.110 -    u16  user;
  15.111 +	u16  kernel;
  15.112 +	u16  user;
  15.113  };
  15.114  static struct grant_handle_pair pending_grant_handles[MMAP_PAGES];
  15.115  #define pending_handle(_idx, _i) \
  15.116 @@ -199,21 +195,20 @@ static struct grant_handle_pair pending_
  15.117   */
  15.118  
  15.119  static struct page *blktap_nopage(struct vm_area_struct *vma,
  15.120 -                                             unsigned long address,
  15.121 -                                             int *type)
  15.122 +				  unsigned long address,
  15.123 +				  int *type)
  15.124  {
  15.125 -    /*
  15.126 -     * if the page has not been mapped in by the driver then generate
  15.127 -     * a SIGBUS to the domain.
  15.128 -     */
  15.129 +	/*
  15.130 +	 * if the page has not been mapped in by the driver then generate
  15.131 +	 * a SIGBUS to the domain.
  15.132 +	 */
  15.133 +	force_sig(SIGBUS, current);
  15.134  
  15.135 -    force_sig(SIGBUS, current);
  15.136 -
  15.137 -    return 0;
  15.138 +	return 0;
  15.139  }
  15.140  
  15.141  struct vm_operations_struct blktap_vm_ops = {
  15.142 -    nopage:   blktap_nopage,
  15.143 +	nopage:   blktap_nopage,
  15.144  };
  15.145  
  15.146  /******************************************************************
  15.147 @@ -222,44 +217,45 @@ struct vm_operations_struct blktap_vm_op
  15.148  
  15.149  static int blktap_open(struct inode *inode, struct file *filp)
  15.150  {
  15.151 -    blkif_sring_t *sring;
  15.152 -    
  15.153 -    if ( test_and_set_bit(0, &blktap_dev_inuse) )
  15.154 -        return -EBUSY;
  15.155 +	blkif_sring_t *sring;
  15.156 +
  15.157 +	if (test_and_set_bit(0, &blktap_dev_inuse))
  15.158 +		return -EBUSY;
  15.159      
  15.160 -    /* Allocate the fe ring. */
  15.161 -    sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
  15.162 -    if (sring == NULL)
  15.163 -        goto fail_nomem;
  15.164 +	/* Allocate the fe ring. */
  15.165 +	sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
  15.166 +	if (sring == NULL)
  15.167 +		goto fail_nomem;
  15.168  
  15.169 -    SetPageReserved(virt_to_page(sring));
  15.170 +	SetPageReserved(virt_to_page(sring));
  15.171      
  15.172 -    SHARED_RING_INIT(sring);
  15.173 -    FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE);
  15.174 +	SHARED_RING_INIT(sring);
  15.175 +	FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE);
  15.176  
  15.177 -    return 0;
  15.178 +	return 0;
  15.179  
  15.180   fail_nomem:
  15.181 -    return -ENOMEM;
  15.182 +	return -ENOMEM;
  15.183  }
  15.184  
  15.185  static int blktap_release(struct inode *inode, struct file *filp)
  15.186  {
  15.187 -    blktap_dev_inuse = 0;
  15.188 -    blktap_ring_ok = 0;
  15.189 -
  15.190 -    /* Free the ring page. */
  15.191 -    ClearPageReserved(virt_to_page(blktap_ufe_ring.sring));
  15.192 -    free_page((unsigned long) blktap_ufe_ring.sring);
  15.193 +	blktap_dev_inuse = 0;
  15.194 +	blktap_ring_ok = 0;
  15.195  
  15.196 -    /* Clear any active mappings and free foreign map table */
  15.197 -    if (blktap_vma != NULL) {
  15.198 -        zap_page_range(blktap_vma, blktap_vma->vm_start, 
  15.199 -                       blktap_vma->vm_end - blktap_vma->vm_start, NULL);
  15.200 -        blktap_vma = NULL;
  15.201 -    }
  15.202 +	/* Free the ring page. */
  15.203 +	ClearPageReserved(virt_to_page(blktap_ufe_ring.sring));
  15.204 +	free_page((unsigned long) blktap_ufe_ring.sring);
  15.205  
  15.206 -    return 0;
  15.207 +	/* Clear any active mappings and free foreign map table */
  15.208 +	if (blktap_vma != NULL) {
  15.209 +		zap_page_range(
  15.210 +			blktap_vma, blktap_vma->vm_start, 
  15.211 +			blktap_vma->vm_end - blktap_vma->vm_start, NULL);
  15.212 +		blktap_vma = NULL;
  15.213 +	}
  15.214 +
  15.215 +	return 0;
  15.216  }
  15.217  
  15.218  
  15.219 @@ -283,128 +279,124 @@ static int blktap_release(struct inode *
  15.220   */
  15.221  static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
  15.222  {
  15.223 -    int size;
  15.224 -    struct page **map;
  15.225 -    int i;
  15.226 -
  15.227 -    DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n",
  15.228 -           vma->vm_start, vma->vm_end);
  15.229 -
  15.230 -    vma->vm_flags |= VM_RESERVED;
  15.231 -    vma->vm_ops = &blktap_vm_ops;
  15.232 -
  15.233 -    size = vma->vm_end - vma->vm_start;
  15.234 -    if ( size != ( (MMAP_PAGES + RING_PAGES) << PAGE_SHIFT ) ) {
  15.235 -        printk(KERN_INFO 
  15.236 -               "blktap: you _must_ map exactly %d pages!\n",
  15.237 -               MMAP_PAGES + RING_PAGES);
  15.238 -        return -EAGAIN;
  15.239 -    }
  15.240 -
  15.241 -    size >>= PAGE_SHIFT;
  15.242 -    DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
  15.243 -    
  15.244 -    rings_vstart = vma->vm_start;
  15.245 -    user_vstart  = rings_vstart + (RING_PAGES << PAGE_SHIFT);
  15.246 -    
  15.247 -    /* Map the ring pages to the start of the region and reserve it. */
  15.248 -
  15.249 -    /* not sure if I really need to do this... */
  15.250 -    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  15.251 +	int size;
  15.252 +	struct page **map;
  15.253 +	int i;
  15.254  
  15.255 -    if (remap_pfn_range(vma, vma->vm_start, 
  15.256 -                         __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT, 
  15.257 -                         PAGE_SIZE, vma->vm_page_prot)) 
  15.258 -    {
  15.259 -        WPRINTK("Mapping user ring failed!\n");
  15.260 -        goto fail;
  15.261 -    }
  15.262 +	DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n",
  15.263 +		vma->vm_start, vma->vm_end);
  15.264  
  15.265 -    /* Mark this VM as containing foreign pages, and set up mappings. */
  15.266 -    map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
  15.267 -                  * sizeof(struct page_struct*),
  15.268 -                  GFP_KERNEL);
  15.269 -    if (map == NULL) 
  15.270 -    {
  15.271 -        WPRINTK("Couldn't alloc VM_FOREIGH map.\n");
  15.272 -        goto fail;
  15.273 -    }
  15.274 +	vma->vm_flags |= VM_RESERVED;
  15.275 +	vma->vm_ops = &blktap_vm_ops;
  15.276  
  15.277 -    for (i=0; i<((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
  15.278 -        map[i] = NULL;
  15.279 +	size = vma->vm_end - vma->vm_start;
  15.280 +	if (size != ((MMAP_PAGES + RING_PAGES) << PAGE_SHIFT)) {
  15.281 +		printk(KERN_INFO 
  15.282 +		       "blktap: you _must_ map exactly %d pages!\n",
  15.283 +		       MMAP_PAGES + RING_PAGES);
  15.284 +		return -EAGAIN;
  15.285 +	}
  15.286 +
  15.287 +	size >>= PAGE_SHIFT;
  15.288 +	DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
  15.289      
  15.290 -    vma->vm_private_data = map;
  15.291 -    vma->vm_flags |= VM_FOREIGN;
  15.292 -
  15.293 -    blktap_vma = vma;
  15.294 -    blktap_ring_ok = 1;
  15.295 +	rings_vstart = vma->vm_start;
  15.296 +	user_vstart  = rings_vstart + (RING_PAGES << PAGE_SHIFT);
  15.297 +    
  15.298 +	/* Map the ring pages to the start of the region and reserve it. */
  15.299  
  15.300 -    return 0;
  15.301 +	/* not sure if I really need to do this... */
  15.302 +	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  15.303 +
  15.304 +	if (remap_pfn_range(vma, vma->vm_start, 
  15.305 +			    __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT, 
  15.306 +			    PAGE_SIZE, vma->vm_page_prot)) {
  15.307 +		WPRINTK("Mapping user ring failed!\n");
  15.308 +		goto fail;
  15.309 +	}
  15.310 +
  15.311 +	/* Mark this VM as containing foreign pages, and set up mappings. */
  15.312 +	map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
  15.313 +		      * sizeof(struct page_struct*),
  15.314 +		      GFP_KERNEL);
  15.315 +	if (map == NULL) {
  15.316 +		WPRINTK("Couldn't alloc VM_FOREIGH map.\n");
  15.317 +		goto fail;
  15.318 +	}
  15.319 +
  15.320 +	for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
  15.321 +		map[i] = NULL;
  15.322 +    
  15.323 +	vma->vm_private_data = map;
  15.324 +	vma->vm_flags |= VM_FOREIGN;
  15.325 +
  15.326 +	blktap_vma = vma;
  15.327 +	blktap_ring_ok = 1;
  15.328 +
  15.329 +	return 0;
  15.330   fail:
  15.331 -    /* Clear any active mappings. */
  15.332 -    zap_page_range(vma, vma->vm_start, 
  15.333 -                   vma->vm_end - vma->vm_start, NULL);
  15.334 +	/* Clear any active mappings. */
  15.335 +	zap_page_range(vma, vma->vm_start, 
  15.336 +		       vma->vm_end - vma->vm_start, NULL);
  15.337  
  15.338 -    return -ENOMEM;
  15.339 +	return -ENOMEM;
  15.340  }
  15.341  
  15.342  static int blktap_ioctl(struct inode *inode, struct file *filp,
  15.343                          unsigned int cmd, unsigned long arg)
  15.344  {
  15.345 -    switch(cmd) {
  15.346 -    case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */
  15.347 -        return blktap_read_ufe_ring();
  15.348 +	switch(cmd) {
  15.349 +	case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */
  15.350 +		return blktap_read_ufe_ring();
  15.351  
  15.352 -    case BLKTAP_IOCTL_SETMODE:
  15.353 -        if (BLKTAP_MODE_VALID(arg)) {
  15.354 -            blktap_mode = arg;
  15.355 -            /* XXX: may need to flush rings here. */
  15.356 -            printk(KERN_INFO "blktap: set mode to %lx\n", arg);
  15.357 -            return 0;
  15.358 -        }
  15.359 -    case BLKTAP_IOCTL_PRINT_IDXS:
  15.360 +	case BLKTAP_IOCTL_SETMODE:
  15.361 +		if (BLKTAP_MODE_VALID(arg)) {
  15.362 +			blktap_mode = arg;
  15.363 +			/* XXX: may need to flush rings here. */
  15.364 +			printk(KERN_INFO "blktap: set mode to %lx\n", arg);
  15.365 +			return 0;
  15.366 +		}
  15.367 +	case BLKTAP_IOCTL_PRINT_IDXS:
  15.368          {
  15.369 -            //print_fe_ring_idxs();
  15.370 -            WPRINTK("User Rings: \n-----------\n");
  15.371 -            WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d "
  15.372 -                            "| req_prod: %2d, rsp_prod: %2d\n",
  15.373 -                            blktap_ufe_ring.rsp_cons,
  15.374 -                            blktap_ufe_ring.req_prod_pvt,
  15.375 -                            blktap_ufe_ring.sring->req_prod,
  15.376 -                            blktap_ufe_ring.sring->rsp_prod);
  15.377 +		//print_fe_ring_idxs();
  15.378 +		WPRINTK("User Rings: \n-----------\n");
  15.379 +		WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d "
  15.380 +			"| req_prod: %2d, rsp_prod: %2d\n",
  15.381 +			blktap_ufe_ring.rsp_cons,
  15.382 +			blktap_ufe_ring.req_prod_pvt,
  15.383 +			blktap_ufe_ring.sring->req_prod,
  15.384 +			blktap_ufe_ring.sring->rsp_prod);
  15.385              
  15.386          }
  15.387 -    }
  15.388 -    return -ENOIOCTLCMD;
  15.389 +	}
  15.390 +	return -ENOIOCTLCMD;
  15.391  }
  15.392  
  15.393  static unsigned int blktap_poll(struct file *file, poll_table *wait)
  15.394  {
  15.395 -        poll_wait(file, &blktap_wait, wait);
  15.396 -        if ( RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring) ) 
  15.397 -        {
  15.398 -            flush_tlb_all();
  15.399 +	poll_wait(file, &blktap_wait, wait);
  15.400 +	if (RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring)) {
  15.401 +		flush_tlb_all();
  15.402 +		RING_PUSH_REQUESTS(&blktap_ufe_ring);
  15.403 +		return POLLIN | POLLRDNORM;
  15.404 +	}
  15.405  
  15.406 -            RING_PUSH_REQUESTS(&blktap_ufe_ring);
  15.407 -            return POLLIN | POLLRDNORM;
  15.408 -        }
  15.409 -
  15.410 -        return 0;
  15.411 +	return 0;
  15.412  }
  15.413  
  15.414  void blktap_kick_user(void)
  15.415  {
  15.416 -    /* blktap_ring->req_prod = blktap_req_prod; */
  15.417 -    wake_up_interruptible(&blktap_wait);
  15.418 +	/* blktap_ring->req_prod = blktap_req_prod; */
  15.419 +	wake_up_interruptible(&blktap_wait);
  15.420  }
  15.421  
  15.422  static struct file_operations blktap_fops = {
  15.423 -    owner:    THIS_MODULE,
  15.424 -    poll:     blktap_poll,
  15.425 -    ioctl:    blktap_ioctl,
  15.426 -    open:     blktap_open,
  15.427 -    release:  blktap_release,
  15.428 -    mmap:     blktap_mmap,
  15.429 +	owner:    THIS_MODULE,
  15.430 +	poll:     blktap_poll,
  15.431 +	ioctl:    blktap_ioctl,
  15.432 +	open:     blktap_open,
  15.433 +	release:  blktap_release,
  15.434 +	mmap:     blktap_mmap,
  15.435  };
  15.436  
  15.437  
  15.438 @@ -417,44 +409,44 @@ static void make_response(blkif_t *blkif
  15.439  
  15.440  static void fast_flush_area(int idx, int nr_pages)
  15.441  {
  15.442 -    struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
  15.443 -    unsigned int i, op = 0;
  15.444 -    struct grant_handle_pair *handle;
  15.445 -    unsigned long ptep;
  15.446 -
  15.447 -    for (i=0; i<nr_pages; i++)
  15.448 -    {
  15.449 -        handle = &pending_handle(idx, i);
  15.450 -        if (!BLKTAP_INVALID_HANDLE(handle))
  15.451 -        {
  15.452 -
  15.453 -            unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i);
  15.454 -            unmap[op].dev_bus_addr = 0;
  15.455 -            unmap[op].handle = handle->kernel;
  15.456 -            op++;
  15.457 +	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
  15.458 +	unsigned int i, op = 0;
  15.459 +	struct grant_handle_pair *handle;
  15.460 +	unsigned long ptep;
  15.461  
  15.462 -            if (create_lookup_pte_addr(blktap_vma->vm_mm,
  15.463 -                                       MMAP_VADDR(user_vstart, idx, i), 
  15.464 -                                       &ptep) !=0) {
  15.465 -                DPRINTK("Couldn't get a pte addr!\n");
  15.466 -                return;
  15.467 -            }
  15.468 -            unmap[op].host_addr    = ptep;
  15.469 -            unmap[op].dev_bus_addr = 0;
  15.470 -            unmap[op].handle       = handle->user;
  15.471 -            op++;
  15.472 +	for ( i = 0; i < nr_pages; i++)
  15.473 +	{
  15.474 +		handle = &pending_handle(idx, i);
  15.475 +		if (BLKTAP_INVALID_HANDLE(handle))
  15.476 +			continue;
  15.477 +
  15.478 +		unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i);
  15.479 +		unmap[op].dev_bus_addr = 0;
  15.480 +		unmap[op].handle = handle->kernel;
  15.481 +		op++;
  15.482 +
  15.483 +		if (create_lookup_pte_addr(
  15.484 +			blktap_vma->vm_mm,
  15.485 +			MMAP_VADDR(user_vstart, idx, i), 
  15.486 +			&ptep) !=0) {
  15.487 +			DPRINTK("Couldn't get a pte addr!\n");
  15.488 +			return;
  15.489 +		}
  15.490 +		unmap[op].host_addr    = ptep;
  15.491 +		unmap[op].dev_bus_addr = 0;
  15.492 +		unmap[op].handle       = handle->user;
  15.493 +		op++;
  15.494              
  15.495 -           BLKTAP_INVALIDATE_HANDLE(handle);
  15.496 -        }
  15.497 -    }
  15.498 -    if ( unlikely(HYPERVISOR_grant_table_op(
  15.499 -        GNTTABOP_unmap_grant_ref, unmap, op)))
  15.500 -        BUG();
  15.501 +		BLKTAP_INVALIDATE_HANDLE(handle);
  15.502 +	}
  15.503  
  15.504 -    if (blktap_vma != NULL)
  15.505 -        zap_page_range(blktap_vma, 
  15.506 -                       MMAP_VADDR(user_vstart, idx, 0), 
  15.507 -                       nr_pages << PAGE_SHIFT, NULL);
  15.508 +	BUG_ON(HYPERVISOR_grant_table_op(
  15.509 +		GNTTABOP_unmap_grant_ref, unmap, op));
  15.510 +
  15.511 +	if (blktap_vma != NULL)
  15.512 +		zap_page_range(blktap_vma, 
  15.513 +			       MMAP_VADDR(user_vstart, idx, 0), 
  15.514 +			       nr_pages << PAGE_SHIFT, NULL);
  15.515  }
  15.516  
  15.517  /******************************************************************
  15.518 @@ -466,34 +458,38 @@ static spinlock_t blkio_schedule_list_lo
  15.519  
  15.520  static int __on_blkdev_list(blkif_t *blkif)
  15.521  {
  15.522 -    return blkif->blkdev_list.next != NULL;
  15.523 +	return blkif->blkdev_list.next != NULL;
  15.524  }
  15.525  
  15.526  static void remove_from_blkdev_list(blkif_t *blkif)
  15.527  {
  15.528 -    unsigned long flags;
  15.529 -    if ( !__on_blkdev_list(blkif) ) return;
  15.530 -    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
  15.531 -    if ( __on_blkdev_list(blkif) )
  15.532 -    {
  15.533 -        list_del(&blkif->blkdev_list);
  15.534 -        blkif->blkdev_list.next = NULL;
  15.535 -        blkif_put(blkif);
  15.536 -    }
  15.537 -    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
  15.538 +	unsigned long flags;
  15.539 +
  15.540 +	if (!__on_blkdev_list(blkif))
  15.541 +		return;
  15.542 +
  15.543 +	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
  15.544 +	if (__on_blkdev_list(blkif)) {
  15.545 +		list_del(&blkif->blkdev_list);
  15.546 +		blkif->blkdev_list.next = NULL;
  15.547 +		blkif_put(blkif);
  15.548 +	}
  15.549 +	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
  15.550  }
  15.551  
  15.552  static void add_to_blkdev_list_tail(blkif_t *blkif)
  15.553  {
  15.554 -    unsigned long flags;
  15.555 -    if ( __on_blkdev_list(blkif) ) return;
  15.556 -    spin_lock_irqsave(&blkio_schedule_list_lock, flags);
  15.557 -    if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
  15.558 -    {
  15.559 -        list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
  15.560 -        blkif_get(blkif);
  15.561 -    }
  15.562 -    spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
  15.563 +	unsigned long flags;
  15.564 +
  15.565 +	if (__on_blkdev_list(blkif))
  15.566 +		return;
  15.567 +
  15.568 +	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
  15.569 +	if (!__on_blkdev_list(blkif) && (blkif->status == CONNECTED)) {
  15.570 +		list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
  15.571 +		blkif_get(blkif);
  15.572 +	}
  15.573 +	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
  15.574  }
  15.575  
  15.576  
  15.577 @@ -505,51 +501,50 @@ static DECLARE_WAIT_QUEUE_HEAD(blkio_sch
  15.578  
  15.579  static int blkio_schedule(void *arg)
  15.580  {
  15.581 -    DECLARE_WAITQUEUE(wq, current);
  15.582 -
  15.583 -    blkif_t          *blkif;
  15.584 -    struct list_head *ent;
  15.585 -
  15.586 -    daemonize("xenblkd");
  15.587 +	DECLARE_WAITQUEUE(wq, current);
  15.588  
  15.589 -    for ( ; ; )
  15.590 -    {
  15.591 -        /* Wait for work to do. */
  15.592 -        add_wait_queue(&blkio_schedule_wait, &wq);
  15.593 -        set_current_state(TASK_INTERRUPTIBLE);
  15.594 -        if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
  15.595 -             list_empty(&blkio_schedule_list) )
  15.596 -            schedule();
  15.597 -        __set_current_state(TASK_RUNNING);
  15.598 -        remove_wait_queue(&blkio_schedule_wait, &wq);
  15.599 +	blkif_t          *blkif;
  15.600 +	struct list_head *ent;
  15.601  
  15.602 -        /* Queue up a batch of requests. */
  15.603 -        while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
  15.604 -                !list_empty(&blkio_schedule_list) )
  15.605 -        {
  15.606 -            ent = blkio_schedule_list.next;
  15.607 -            blkif = list_entry(ent, blkif_t, blkdev_list);
  15.608 -            blkif_get(blkif);
  15.609 -            remove_from_blkdev_list(blkif);
  15.610 -            if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
  15.611 -                add_to_blkdev_list_tail(blkif);
  15.612 -            blkif_put(blkif);
  15.613 -        }
  15.614 -    }
  15.615 +	daemonize("xenblkd");
  15.616 +
  15.617 +	for (;;) {
  15.618 +		/* Wait for work to do. */
  15.619 +		add_wait_queue(&blkio_schedule_wait, &wq);
  15.620 +		set_current_state(TASK_INTERRUPTIBLE);
  15.621 +		if ((NR_PENDING_REQS == MAX_PENDING_REQS) || 
  15.622 +		    list_empty(&blkio_schedule_list))
  15.623 +			schedule();
  15.624 +		__set_current_state(TASK_RUNNING);
  15.625 +		remove_wait_queue(&blkio_schedule_wait, &wq);
  15.626 +
  15.627 +		/* Queue up a batch of requests. */
  15.628 +		while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
  15.629 +		       !list_empty(&blkio_schedule_list)) {
  15.630 +			ent = blkio_schedule_list.next;
  15.631 +			blkif = list_entry(ent, blkif_t, blkdev_list);
  15.632 +			blkif_get(blkif);
  15.633 +			remove_from_blkdev_list(blkif);
  15.634 +			if (do_block_io_op(blkif, BATCH_PER_DOMAIN))
  15.635 +				add_to_blkdev_list_tail(blkif);
  15.636 +			blkif_put(blkif);
  15.637 +		}
  15.638 +	}
  15.639  }
  15.640  
  15.641  static void maybe_trigger_blkio_schedule(void)
  15.642  {
  15.643 -    /*
  15.644 -     * Needed so that two processes, who together make the following predicate
  15.645 -     * true, don't both read stale values and evaluate the predicate
  15.646 -     * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
  15.647 -     */
  15.648 -    smp_mb();
  15.649 +	/*
  15.650 +	 * Needed so that two processes, who together make the following
  15.651 +	 * predicate true, don't both read stale values and evaluate the
  15.652 +	 * predicate incorrectly. Incredibly unlikely to stall the scheduler
  15.653 +	 * on the x86, but...
  15.654 +	 */
  15.655 +	smp_mb();
  15.656  
  15.657 -    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
  15.658 -         !list_empty(&blkio_schedule_list) )
  15.659 -        wake_up(&blkio_schedule_wait);
  15.660 +	if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
  15.661 +	    !list_empty(&blkio_schedule_list))
  15.662 +		wake_up(&blkio_schedule_wait);
  15.663  }
  15.664  
  15.665  
  15.666 @@ -561,54 +556,53 @@ static void maybe_trigger_blkio_schedule
  15.667  
  15.668  static int blktap_read_ufe_ring(void)
  15.669  {
  15.670 -    /* This is called to read responses from the UFE ring. */
  15.671 -
  15.672 -    RING_IDX i, j, rp;
  15.673 -    blkif_response_t *resp;
  15.674 -    blkif_t *blkif;
  15.675 -    int pending_idx;
  15.676 -    pending_req_t *pending_req;
  15.677 -    unsigned long     flags;
  15.678 +	/* This is called to read responses from the UFE ring. */
  15.679  
  15.680 -    /* if we are forwarding from UFERring to FERing */
  15.681 -    if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) {
  15.682 +	RING_IDX i, j, rp;
  15.683 +	blkif_response_t *resp;
  15.684 +	blkif_t *blkif;
  15.685 +	int pending_idx;
  15.686 +	pending_req_t *pending_req;
  15.687 +	unsigned long     flags;
  15.688  
  15.689 -        /* for each outstanding message on the UFEring  */
  15.690 -        rp = blktap_ufe_ring.sring->rsp_prod;
  15.691 -        rmb();
  15.692 +	/* if we are forwarding from UFERring to FERing */
  15.693 +	if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) {
  15.694 +
  15.695 +		/* for each outstanding message on the UFEring  */
  15.696 +		rp = blktap_ufe_ring.sring->rsp_prod;
  15.697 +		rmb();
  15.698          
  15.699 -        for ( i = blktap_ufe_ring.rsp_cons; i != rp; i++ )
  15.700 -        {
  15.701 -            resp = RING_GET_RESPONSE(&blktap_ufe_ring, i);
  15.702 -            pending_idx = MASK_PEND_IDX(ID_TO_IDX(resp->id));
  15.703 -            pending_req = &pending_reqs[pending_idx];
  15.704 +		for (i = blktap_ufe_ring.rsp_cons; i != rp; i++) {
  15.705 +			resp = RING_GET_RESPONSE(&blktap_ufe_ring, i);
  15.706 +			pending_idx = MASK_PEND_IDX(ID_TO_IDX(resp->id));
  15.707 +			pending_req = &pending_reqs[pending_idx];
  15.708              
  15.709 -            blkif = pending_req->blkif;
  15.710 -            for (j = 0; j < pending_req->nr_pages; j++) {
  15.711 -                unsigned long vaddr;
  15.712 -                struct page **map = blktap_vma->vm_private_data;
  15.713 -                int offset; 
  15.714 -
  15.715 -                vaddr  = MMAP_VADDR(user_vstart, pending_idx, j);
  15.716 -                offset = (vaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
  15.717 +			blkif = pending_req->blkif;
  15.718 +			for (j = 0; j < pending_req->nr_pages; j++) {
  15.719 +				unsigned long vaddr;
  15.720 +				struct page **map = blktap_vma->vm_private_data;
  15.721 +				int offset; 
  15.722  
  15.723 -                //ClearPageReserved(virt_to_page(vaddr));
  15.724 -                ClearPageReserved((struct page *)map[offset]);
  15.725 -                map[offset] = NULL;
  15.726 -            }
  15.727 +				vaddr  = MMAP_VADDR(user_vstart, pending_idx, j);
  15.728 +				offset = (vaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
  15.729  
  15.730 -            fast_flush_area(pending_idx, pending_req->nr_pages);
  15.731 -            make_response(blkif, pending_req->id, resp->operation, 
  15.732 -                          resp->status);
  15.733 -            blkif_put(pending_req->blkif);
  15.734 -            spin_lock_irqsave(&pend_prod_lock, flags);
  15.735 -            pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
  15.736 -            spin_unlock_irqrestore(&pend_prod_lock, flags);
  15.737 -        }
  15.738 -        blktap_ufe_ring.rsp_cons = i;
  15.739 -        maybe_trigger_blkio_schedule();
  15.740 -    }
  15.741 -    return 0;
  15.742 +				//ClearPageReserved(virt_to_page(vaddr));
  15.743 +				ClearPageReserved((struct page *)map[offset]);
  15.744 +				map[offset] = NULL;
  15.745 +			}
  15.746 +
  15.747 +			fast_flush_area(pending_idx, pending_req->nr_pages);
  15.748 +			make_response(blkif, pending_req->id, resp->operation, 
  15.749 +				      resp->status);
  15.750 +			blkif_put(pending_req->blkif);
  15.751 +			spin_lock_irqsave(&pend_prod_lock, flags);
  15.752 +			pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
  15.753 +			spin_unlock_irqrestore(&pend_prod_lock, flags);
  15.754 +		}
  15.755 +		blktap_ufe_ring.rsp_cons = i;
  15.756 +		maybe_trigger_blkio_schedule();
  15.757 +	}
  15.758 +	return 0;
  15.759  }
  15.760  
  15.761  
  15.762 @@ -618,10 +612,10 @@ static int blktap_read_ufe_ring(void)
  15.763  
  15.764  irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
  15.765  {
  15.766 -    blkif_t *blkif = dev_id;
  15.767 -    add_to_blkdev_list_tail(blkif);
  15.768 -    maybe_trigger_blkio_schedule();
  15.769 -    return IRQ_HANDLED;
  15.770 +	blkif_t *blkif = dev_id;
  15.771 +	add_to_blkdev_list_tail(blkif);
  15.772 +	maybe_trigger_blkio_schedule();
  15.773 +	return IRQ_HANDLED;
  15.774  }
  15.775  
  15.776  
  15.777 @@ -632,199 +626,194 @@ irqreturn_t blkif_be_int(int irq, void *
  15.778  
  15.779  static int do_block_io_op(blkif_t *blkif, int max_to_do)
  15.780  {
  15.781 -    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
  15.782 -    blkif_request_t *req;
  15.783 -    RING_IDX i, rp;
  15.784 -    int more_to_do = 0;
  15.785 +	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
  15.786 +	blkif_request_t *req;
  15.787 +	RING_IDX i, rp;
  15.788 +	int more_to_do = 0;
  15.789      
  15.790 -    rp = blk_ring->sring->req_prod;
  15.791 -    rmb(); /* Ensure we see queued requests up to 'rp'. */
  15.792 +	rp = blk_ring->sring->req_prod;
  15.793 +	rmb(); /* Ensure we see queued requests up to 'rp'. */
  15.794  
  15.795 -    for ( i = blk_ring->req_cons; 
  15.796 -         (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
  15.797 -          i++ )
  15.798 -    {
  15.799 -        if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
  15.800 -        {
  15.801 -            more_to_do = 1;
  15.802 -            break;
  15.803 -        }
  15.804 +	for (i = blk_ring->req_cons; 
  15.805 +	     (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
  15.806 +	     i++ ) {
  15.807 +		if ((max_to_do-- == 0) ||
  15.808 +		    (NR_PENDING_REQS == MAX_PENDING_REQS)) {
  15.809 +			more_to_do = 1;
  15.810 +			break;
  15.811 +		}
  15.812          
  15.813 -        req = RING_GET_REQUEST(blk_ring, i);
  15.814 -        switch ( req->operation )
  15.815 -        {
  15.816 -        case BLKIF_OP_READ:
  15.817 -        case BLKIF_OP_WRITE:
  15.818 -            dispatch_rw_block_io(blkif, req);
  15.819 -            break;
  15.820 +		req = RING_GET_REQUEST(blk_ring, i);
  15.821 +		switch (req->operation) {
  15.822 +		case BLKIF_OP_READ:
  15.823 +		case BLKIF_OP_WRITE:
  15.824 +			dispatch_rw_block_io(blkif, req);
  15.825 +			break;
  15.826  
  15.827 -        default:
  15.828 -            DPRINTK("error: unknown block io operation [%d]\n",
  15.829 -                    req->operation);
  15.830 -            make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
  15.831 -            break;
  15.832 -        }
  15.833 -    }
  15.834 +		default:
  15.835 +			DPRINTK("error: unknown block io operation [%d]\n",
  15.836 +				req->operation);
  15.837 +			make_response(blkif, req->id, req->operation,
  15.838 +				      BLKIF_RSP_ERROR);
  15.839 +			break;
  15.840 +		}
  15.841 +	}
  15.842  
  15.843 -    blk_ring->req_cons = i;
  15.844 -    blktap_kick_user();
  15.845 +	blk_ring->req_cons = i;
  15.846 +	blktap_kick_user();
  15.847  
  15.848 -    return more_to_do;
  15.849 +	return more_to_do;
  15.850  }
  15.851  
  15.852  static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
  15.853  {
  15.854 -    blkif_request_t *target;
  15.855 -    int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
  15.856 -    pending_req_t *pending_req;
  15.857 -    struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
  15.858 -    int op, ret;
  15.859 -    unsigned int nseg;
  15.860 +	blkif_request_t *target;
  15.861 +	int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
  15.862 +	pending_req_t *pending_req;
  15.863 +	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
  15.864 +	int op, ret;
  15.865 +	unsigned int nseg;
  15.866  
  15.867 -    /* Check that number of segments is sane. */
  15.868 -    nseg = req->nr_segments;
  15.869 -    if ( unlikely(nseg == 0) || 
  15.870 -         unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
  15.871 -    {
  15.872 -        DPRINTK("Bad number of segments in request (%d)\n", nseg);
  15.873 -        goto bad_descriptor;
  15.874 -    }
  15.875 +	/* Check that number of segments is sane. */
  15.876 +	nseg = req->nr_segments;
  15.877 +	if (unlikely(nseg == 0) || 
  15.878 +	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
  15.879 +		DPRINTK("Bad number of segments in request (%d)\n", nseg);
  15.880 +		goto bad_descriptor;
  15.881 +	}
  15.882  
  15.883 -    /* Make sure userspace is ready. */
  15.884 -    if (!blktap_ring_ok) {
  15.885 -        DPRINTK("blktap: ring not ready for requests!\n");
  15.886 -        goto bad_descriptor;
  15.887 -    }
  15.888 +	/* Make sure userspace is ready. */
  15.889 +	if (!blktap_ring_ok) {
  15.890 +		DPRINTK("blktap: ring not ready for requests!\n");
  15.891 +		goto bad_descriptor;
  15.892 +	}
  15.893      
  15.894  
  15.895 -    if ( RING_FULL(&blktap_ufe_ring) ) {
  15.896 -        WPRINTK("blktap: fe_ring is full, can't add (very broken!).\n");
  15.897 -        goto bad_descriptor;
  15.898 -    }
  15.899 -
  15.900 -    flush_cache_all(); /* a noop on intel... */
  15.901 -
  15.902 -    /* Map the foreign pages directly in to the application */    
  15.903 -    op = 0;
  15.904 -    for (i=0; i<req->nr_segments; i++) {
  15.905 -
  15.906 -        unsigned long uvaddr;
  15.907 -        unsigned long kvaddr;
  15.908 -        unsigned long ptep;
  15.909 -
  15.910 -        uvaddr = MMAP_VADDR(user_vstart, pending_idx, i);
  15.911 -        kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
  15.912 -
  15.913 -        /* Map the remote page to kernel. */
  15.914 -        map[op].host_addr = kvaddr;
  15.915 -        map[op].dom   = blkif->domid;
  15.916 -        map[op].ref   = blkif_gref_from_fas(req->frame_and_sects[i]);
  15.917 -        map[op].flags = GNTMAP_host_map;
  15.918 -        /* This needs a bit more thought in terms of interposition: 
  15.919 -         * If we want to be able to modify pages during write using 
  15.920 -         * grant table mappings, the guest will either need to allow 
  15.921 -         * it, or we'll need to incur a copy. Bit of an fbufs moment. ;) */
  15.922 -        if (req->operation == BLKIF_OP_WRITE)
  15.923 -            map[op].flags |= GNTMAP_readonly;
  15.924 -        op++;
  15.925 -
  15.926 -        /* Now map it to user. */
  15.927 -        ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep);
  15.928 -        if (ret)
  15.929 -        {
  15.930 -            DPRINTK("Couldn't get a pte addr!\n");
  15.931 -            fast_flush_area(pending_idx, req->nr_segments);
  15.932 -            goto bad_descriptor;
  15.933 -        }
  15.934 -
  15.935 -        map[op].host_addr = ptep;
  15.936 -        map[op].dom       = blkif->domid;
  15.937 -        map[op].ref       = blkif_gref_from_fas(req->frame_and_sects[i]);
  15.938 -        map[op].flags     = GNTMAP_host_map | GNTMAP_application_map
  15.939 -                            | GNTMAP_contains_pte;
  15.940 -        /* Above interposition comment applies here as well. */
  15.941 -        if (req->operation == BLKIF_OP_WRITE)
  15.942 -            map[op].flags |= GNTMAP_readonly;
  15.943 -        op++;
  15.944 -    }
  15.945 -
  15.946 -    if ( unlikely(HYPERVISOR_grant_table_op(
  15.947 -            GNTTABOP_map_grant_ref, map, op)))
  15.948 -        BUG();
  15.949 -
  15.950 -    op = 0;
  15.951 -    for (i=0; i<(req->nr_segments*2); i+=2) {
  15.952 -        unsigned long uvaddr;
  15.953 -        unsigned long kvaddr;
  15.954 -        unsigned long offset;
  15.955 -        int cancel = 0;
  15.956 +	if (RING_FULL(&blktap_ufe_ring)) {
  15.957 +		WPRINTK("blktap: fe_ring is full, can't add "
  15.958 +			"(very broken!).\n");
  15.959 +		goto bad_descriptor;
  15.960 +	}
  15.961  
  15.962 -        uvaddr = MMAP_VADDR(user_vstart, pending_idx, i/2);
  15.963 -        kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i/2);
  15.964 -
  15.965 -        if ( unlikely(map[i].handle < 0) ) 
  15.966 -        {
  15.967 -            DPRINTK("Error on kernel grant mapping (%d)\n", map[i].handle);
  15.968 -            ret = map[i].handle;
  15.969 -            cancel = 1;
  15.970 -        }
  15.971 -
  15.972 -        if ( unlikely(map[i+1].handle < 0) ) 
  15.973 -        {
  15.974 -            DPRINTK("Error on user grant mapping (%d)\n", map[i+1].handle);
  15.975 -            ret = map[i+1].handle;
  15.976 -            cancel = 1;
  15.977 -        }
  15.978 -
  15.979 -        if (cancel) 
  15.980 -        {
  15.981 -            fast_flush_area(pending_idx, req->nr_segments);
  15.982 -            goto bad_descriptor;
  15.983 -        }
  15.984 -
  15.985 -        /* Set the necessary mappings in p2m and in the VM_FOREIGN 
  15.986 -         * vm_area_struct to allow user vaddr -> struct page lookups
  15.987 -         * to work.  This is needed for direct IO to foreign pages. */
  15.988 -        phys_to_machine_mapping[__pa(kvaddr) >> PAGE_SHIFT] =
  15.989 -            FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
  15.990 +	flush_cache_all(); /* a noop on intel... */
  15.991  
  15.992 -        offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
  15.993 -        ((struct page **)blktap_vma->vm_private_data)[offset] =
  15.994 -            pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
  15.995 -
  15.996 -        /* Save handles for unmapping later. */
  15.997 -        pending_handle(pending_idx, i/2).kernel = map[i].handle;
  15.998 -        pending_handle(pending_idx, i/2).user   = map[i+1].handle;
  15.999 -    }
 15.1000 -
 15.1001 -    /* Mark mapped pages as reserved: */
 15.1002 -    for ( i = 0; i < req->nr_segments; i++ )
 15.1003 -    {
 15.1004 -        unsigned long kvaddr;
 15.1005 +	/* Map the foreign pages directly in to the application */    
 15.1006 +	op = 0;
 15.1007 +	for (i = 0; i < req->nr_segments; i++) {
 15.1008  
 15.1009 -        kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
 15.1010 -        SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT));
 15.1011 -    }
 15.1012 +		unsigned long uvaddr;
 15.1013 +		unsigned long kvaddr;
 15.1014 +		unsigned long ptep;
 15.1015  
 15.1016 -    pending_req = &pending_reqs[pending_idx];
 15.1017 -    pending_req->blkif     = blkif;
 15.1018 -    pending_req->id        = req->id;
 15.1019 -    pending_req->operation = req->operation;
 15.1020 -    pending_req->status    = BLKIF_RSP_OKAY;
 15.1021 -    pending_req->nr_pages  = nseg;
 15.1022 -    req->id = MAKE_ID(blkif->domid, pending_idx);
 15.1023 -    //atomic_set(&pending_req->pendcnt, nbio);
 15.1024 -    pending_cons++;
 15.1025 -    blkif_get(blkif);
 15.1026 +		uvaddr = MMAP_VADDR(user_vstart, pending_idx, i);
 15.1027 +		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
 15.1028  
 15.1029 -    /* Finally, write the request message to the user ring. */
 15.1030 -    target = RING_GET_REQUEST(&blktap_ufe_ring, blktap_ufe_ring.req_prod_pvt);
 15.1031 -    memcpy(target, req, sizeof(*req));
 15.1032 -    blktap_ufe_ring.req_prod_pvt++;
 15.1033 -    return;
 15.1034 +		/* Map the remote page to kernel. */
 15.1035 +		map[op].host_addr = kvaddr;
 15.1036 +		map[op].dom   = blkif->domid;
 15.1037 +		map[op].ref   = blkif_gref_from_fas(req->frame_and_sects[i]);
 15.1038 +		map[op].flags = GNTMAP_host_map;
 15.1039 +		/* This needs a bit more thought in terms of interposition: 
 15.1040 +		 * If we want to be able to modify pages during write using 
 15.1041 +		 * grant table mappings, the guest will either need to allow 
 15.1042 +		 * it, or we'll need to incur a copy. Bit of an fbufs moment. ;) */
 15.1043 +		if (req->operation == BLKIF_OP_WRITE)
 15.1044 +			map[op].flags |= GNTMAP_readonly;
 15.1045 +		op++;
 15.1046 +
 15.1047 +		/* Now map it to user. */
 15.1048 +		ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep);
 15.1049 +		if (ret) {
 15.1050 +			DPRINTK("Couldn't get a pte addr!\n");
 15.1051 +			fast_flush_area(pending_idx, req->nr_segments);
 15.1052 +			goto bad_descriptor;
 15.1053 +		}
 15.1054 +
 15.1055 +		map[op].host_addr = ptep;
 15.1056 +		map[op].dom       = blkif->domid;
 15.1057 +		map[op].ref       = blkif_gref_from_fas(req->frame_and_sects[i]);
 15.1058 +		map[op].flags     = GNTMAP_host_map | GNTMAP_application_map
 15.1059 +			| GNTMAP_contains_pte;
 15.1060 +		/* Above interposition comment applies here as well. */
 15.1061 +		if (req->operation == BLKIF_OP_WRITE)
 15.1062 +			map[op].flags |= GNTMAP_readonly;
 15.1063 +		op++;
 15.1064 +	}
 15.1065 +
 15.1066 +	BUG_ON(HYPERVISOR_grant_table_op(
 15.1067 +		GNTTABOP_map_grant_ref, map, op));
 15.1068 +
 15.1069 +	op = 0;
 15.1070 +	for (i = 0; i < (req->nr_segments*2); i += 2) {
 15.1071 +		unsigned long uvaddr;
 15.1072 +		unsigned long kvaddr;
 15.1073 +		unsigned long offset;
 15.1074 +		int cancel = 0;
 15.1075 +
 15.1076 +		uvaddr = MMAP_VADDR(user_vstart, pending_idx, i/2);
 15.1077 +		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i/2);
 15.1078 +
 15.1079 +		if (unlikely(map[i].handle < 0)) {
 15.1080 +			DPRINTK("Error on kernel grant mapping (%d)\n",
 15.1081 +				map[i].handle);
 15.1082 +			ret = map[i].handle;
 15.1083 +			cancel = 1;
 15.1084 +		}
 15.1085 +
 15.1086 +		if (unlikely(map[i+1].handle < 0)) {
 15.1087 +			DPRINTK("Error on user grant mapping (%d)\n",
 15.1088 +				map[i+1].handle);
 15.1089 +			ret = map[i+1].handle;
 15.1090 +			cancel = 1;
 15.1091 +		}
 15.1092 +
 15.1093 +		if (cancel) {
 15.1094 +			fast_flush_area(pending_idx, req->nr_segments);
 15.1095 +			goto bad_descriptor;
 15.1096 +		}
 15.1097 +
 15.1098 +		/* Set the necessary mappings in p2m and in the VM_FOREIGN 
 15.1099 +		 * vm_area_struct to allow user vaddr -> struct page lookups
 15.1100 +		 * to work.  This is needed for direct IO to foreign pages. */
 15.1101 +		phys_to_machine_mapping[__pa(kvaddr) >> PAGE_SHIFT] =
 15.1102 +			FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
 15.1103 +
 15.1104 +		offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
 15.1105 +		((struct page **)blktap_vma->vm_private_data)[offset] =
 15.1106 +			pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
 15.1107 +
 15.1108 +		/* Save handles for unmapping later. */
 15.1109 +		pending_handle(pending_idx, i/2).kernel = map[i].handle;
 15.1110 +		pending_handle(pending_idx, i/2).user   = map[i+1].handle;
 15.1111 +	}
 15.1112 +
 15.1113 +	/* Mark mapped pages as reserved: */
 15.1114 +	for (i = 0; i < req->nr_segments; i++) {
 15.1115 +		unsigned long kvaddr;
 15.1116 +		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
 15.1117 +		SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT));
 15.1118 +	}
 15.1119 +
 15.1120 +	pending_req = &pending_reqs[pending_idx];
 15.1121 +	pending_req->blkif     = blkif;
 15.1122 +	pending_req->id        = req->id;
 15.1123 +	pending_req->operation = req->operation;
 15.1124 +	pending_req->status    = BLKIF_RSP_OKAY;
 15.1125 +	pending_req->nr_pages  = nseg;
 15.1126 +	req->id = MAKE_ID(blkif->domid, pending_idx);
 15.1127 +	//atomic_set(&pending_req->pendcnt, nbio);
 15.1128 +	pending_cons++;
 15.1129 +	blkif_get(blkif);
 15.1130 +
 15.1131 +	/* Finally, write the request message to the user ring. */
 15.1132 +	target = RING_GET_REQUEST(&blktap_ufe_ring,
 15.1133 +				  blktap_ufe_ring.req_prod_pvt);
 15.1134 +	memcpy(target, req, sizeof(*req));
 15.1135 +	blktap_ufe_ring.req_prod_pvt++;
 15.1136 +	return;
 15.1137  
 15.1138   bad_descriptor:
 15.1139 -    make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
 15.1140 +	make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
 15.1141  } 
 15.1142  
 15.1143  
 15.1144 @@ -837,80 +826,89 @@ static void dispatch_rw_block_io(blkif_t
 15.1145  static void make_response(blkif_t *blkif, unsigned long id, 
 15.1146                            unsigned short op, int st)
 15.1147  {
 15.1148 -    blkif_response_t *resp;
 15.1149 -    unsigned long     flags;
 15.1150 -    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
 15.1151 +	blkif_response_t *resp;
 15.1152 +	unsigned long     flags;
 15.1153 +	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
 15.1154  
 15.1155 -    /* Place on the response ring for the relevant domain. */ 
 15.1156 -    spin_lock_irqsave(&blkif->blk_ring_lock, flags);
 15.1157 -    resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
 15.1158 -    resp->id        = id;
 15.1159 -    resp->operation = op;
 15.1160 -    resp->status    = st;
 15.1161 -    wmb(); /* Ensure other side can see the response fields. */
 15.1162 -    blk_ring->rsp_prod_pvt++;
 15.1163 -    RING_PUSH_RESPONSES(blk_ring);
 15.1164 -    spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
 15.1165 +	/* Place on the response ring for the relevant domain. */ 
 15.1166 +	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
 15.1167 +	resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
 15.1168 +	resp->id        = id;
 15.1169 +	resp->operation = op;
 15.1170 +	resp->status    = st;
 15.1171 +	wmb(); /* Ensure other side can see the response fields. */
 15.1172 +	blk_ring->rsp_prod_pvt++;
 15.1173 +	RING_PUSH_RESPONSES(blk_ring);
 15.1174 +	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
 15.1175  
 15.1176 -    /* Kick the relevant domain. */
 15.1177 -    notify_via_evtchn(blkif->evtchn);
 15.1178 +	/* Kick the relevant domain. */
 15.1179 +	notify_via_evtchn(blkif->evtchn);
 15.1180  }
 15.1181  
 15.1182  static struct miscdevice blktap_miscdev = {
 15.1183 -    .minor        = BLKTAP_MINOR,
 15.1184 -    .name         = "blktap",
 15.1185 -    .fops         = &blktap_fops,
 15.1186 -    .devfs_name   = "misc/blktap",
 15.1187 +	.minor        = BLKTAP_MINOR,
 15.1188 +	.name         = "blktap",
 15.1189 +	.fops         = &blktap_fops,
 15.1190 +	.devfs_name   = "misc/blktap",
 15.1191  };
 15.1192  
 15.1193  void blkif_deschedule(blkif_t *blkif)
 15.1194  {
 15.1195 -    remove_from_blkdev_list(blkif);
 15.1196 +	remove_from_blkdev_list(blkif);
 15.1197  }
 15.1198  
 15.1199  static int __init blkif_init(void)
 15.1200  {
 15.1201 -    int i, j, err;
 15.1202 -    struct page *page;
 15.1203 +	int i, j, err;
 15.1204 +	struct page *page;
 15.1205  /*
 15.1206 -    if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
 15.1207 -         !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
 15.1208 -        return 0;
 15.1209 +  if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
 15.1210 +  !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
 15.1211 +  return 0;
 15.1212  */
 15.1213 -    blkif_interface_init();
 15.1214 +	blkif_interface_init();
 15.1215  
 15.1216 -    page = balloon_alloc_empty_page_range(MMAP_PAGES);
 15.1217 -    BUG_ON(page == NULL);
 15.1218 -    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
 15.1219 +	page = balloon_alloc_empty_page_range(MMAP_PAGES);
 15.1220 +	BUG_ON(page == NULL);
 15.1221 +	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
 15.1222  
 15.1223 -    pending_cons = 0;
 15.1224 -    pending_prod = MAX_PENDING_REQS;
 15.1225 -    memset(pending_reqs, 0, sizeof(pending_reqs));
 15.1226 -    for ( i = 0; i < MAX_PENDING_REQS; i++ )
 15.1227 -        pending_ring[i] = i;
 15.1228 +	pending_cons = 0;
 15.1229 +	pending_prod = MAX_PENDING_REQS;
 15.1230 +	memset(pending_reqs, 0, sizeof(pending_reqs));
 15.1231 +	for ( i = 0; i < MAX_PENDING_REQS; i++ )
 15.1232 +		pending_ring[i] = i;
 15.1233      
 15.1234 -    spin_lock_init(&blkio_schedule_list_lock);
 15.1235 -    INIT_LIST_HEAD(&blkio_schedule_list);
 15.1236 -
 15.1237 -    if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
 15.1238 -        BUG();
 15.1239 -
 15.1240 -    blkif_xenbus_init();
 15.1241 +	spin_lock_init(&blkio_schedule_list_lock);
 15.1242 +	INIT_LIST_HEAD(&blkio_schedule_list);
 15.1243  
 15.1244 -    for (i=0; i<MAX_PENDING_REQS ; i++)
 15.1245 -        for (j=0; j<BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
 15.1246 -            BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j));
 15.1247 +	BUG_ON(kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0);
 15.1248  
 15.1249 -    err = misc_register(&blktap_miscdev);
 15.1250 -    if ( err != 0 )
 15.1251 -    {
 15.1252 -        printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n", err);
 15.1253 -        return err;
 15.1254 -    }
 15.1255 +	blkif_xenbus_init();
 15.1256  
 15.1257 -    init_waitqueue_head(&blktap_wait);
 15.1258 +	for (i = 0; i < MAX_PENDING_REQS ; i++)
 15.1259 +		for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
 15.1260 +			BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j));
 15.1261  
 15.1262 -    return 0;
 15.1263 +	err = misc_register(&blktap_miscdev);
 15.1264 +	if (err != 0) {
 15.1265 +		printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n",
 15.1266 +		       err);
 15.1267 +		return err;
 15.1268 +	}
 15.1269 +
 15.1270 +	init_waitqueue_head(&blktap_wait);
 15.1271 +
 15.1272 +	return 0;
 15.1273  }
 15.1274  
 15.1275  __initcall(blkif_init);
 15.1276 +
 15.1277 +/*
 15.1278 + * Local variables:
 15.1279 + *  c-file-style: "linux"
 15.1280 + *  indent-tabs-mode: t
 15.1281 + *  c-indent-level: 8
 15.1282 + *  c-basic-offset: 8
 15.1283 + *  tab-width: 8
 15.1284 + * End:
 15.1285 + */
    16.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/common.h	Thu Sep 22 16:05:44 2005 +0100
    16.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/common.h	Thu Sep 22 16:12:14 2005 +0100
    16.3 @@ -33,39 +33,39 @@
    16.4  #define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
    16.5  
    16.6  struct vbd {
    16.7 -    blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
    16.8 -    unsigned char  readonly;    /* Non-zero -> read-only */
    16.9 -    unsigned char  type;        /* VDISK_xxx */
   16.10 -    u32            pdevice;     /* phys device that this vbd maps to */
   16.11 -    struct block_device *bdev;
   16.12 +	blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
   16.13 +	unsigned char  readonly;    /* Non-zero -> read-only */
   16.14 +	unsigned char  type;        /* VDISK_xxx */
   16.15 +	u32            pdevice;     /* phys device that this vbd maps to */
   16.16 +	struct block_device *bdev;
   16.17  }; 
   16.18  
   16.19  typedef struct blkif_st {
   16.20 -    /* Unique identifier for this interface. */
   16.21 -    domid_t           domid;
   16.22 -    unsigned int      handle;
   16.23 -    /* Physical parameters of the comms window. */
   16.24 -    unsigned int      evtchn;
   16.25 -    unsigned int      remote_evtchn;
   16.26 -    /* Comms information. */
   16.27 -    blkif_back_ring_t blk_ring;
   16.28 -    struct vm_struct *blk_ring_area;
   16.29 -    /* VBDs attached to this interface. */
   16.30 -    struct vbd        vbd;
   16.31 -    /* Private fields. */
   16.32 -    enum { DISCONNECTED, CONNECTED } status;
   16.33 +	/* Unique identifier for this interface. */
   16.34 +	domid_t           domid;
   16.35 +	unsigned int      handle;
   16.36 +	/* Physical parameters of the comms window. */
   16.37 +	unsigned int      evtchn;
   16.38 +	unsigned int      remote_evtchn;
   16.39 +	/* Comms information. */
   16.40 +	blkif_back_ring_t blk_ring;
   16.41 +	struct vm_struct *blk_ring_area;
   16.42 +	/* VBDs attached to this interface. */
   16.43 +	struct vbd        vbd;
   16.44 +	/* Private fields. */
   16.45 +	enum { DISCONNECTED, CONNECTED } status;
   16.46  #ifdef CONFIG_XEN_BLKDEV_TAP_BE
   16.47 -    /* Is this a blktap frontend */
   16.48 -    unsigned int     is_blktap;
   16.49 +	/* Is this a blktap frontend */
   16.50 +	unsigned int     is_blktap;
   16.51  #endif
   16.52 -    struct list_head blkdev_list;
   16.53 -    spinlock_t       blk_ring_lock;
   16.54 -    atomic_t         refcnt;
   16.55 +	struct list_head blkdev_list;
   16.56 +	spinlock_t       blk_ring_lock;
   16.57 +	atomic_t         refcnt;
   16.58  
   16.59 -    struct work_struct free_work;
   16.60 +	struct work_struct free_work;
   16.61  
   16.62 -    u16              shmem_handle;
   16.63 -    grant_ref_t      shmem_ref;
   16.64 +	u16              shmem_handle;
   16.65 +	grant_ref_t      shmem_ref;
   16.66  } blkif_t;
   16.67  
   16.68  blkif_t *alloc_blkif(domid_t domid);
   16.69 @@ -89,10 +89,10 @@ unsigned int vbd_info(struct vbd *vbd);
   16.70  unsigned long vbd_secsize(struct vbd *vbd);
   16.71  
   16.72  struct phys_req {
   16.73 -    unsigned short       dev;
   16.74 -    unsigned short       nr_sects;
   16.75 -    struct block_device *bdev;
   16.76 -    blkif_sector_t       sector_number;
   16.77 +	unsigned short       dev;
   16.78 +	unsigned short       nr_sects;
   16.79 +	struct block_device *bdev;
   16.80 +	blkif_sector_t       sector_number;
   16.81  };
   16.82  
   16.83  int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation); 
   16.84 @@ -106,3 +106,13 @@ void blkif_xenbus_init(void);
   16.85  irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
   16.86  
   16.87  #endif /* __BLKIF__BACKEND__COMMON_H__ */
   16.88 +
   16.89 +/*
   16.90 + * Local variables:
   16.91 + *  c-file-style: "linux"
   16.92 + *  indent-tabs-mode: t
   16.93 + *  c-indent-level: 8
   16.94 + *  c-basic-offset: 8
   16.95 + *  tab-width: 8
   16.96 + * End:
   16.97 + */
    17.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c	Thu Sep 22 16:05:44 2005 +0100
    17.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c	Thu Sep 22 16:12:14 2005 +0100
    17.3 @@ -222,3 +222,13 @@ void blkif_xenbus_init(void)
    17.4  {
    17.5  	xenbus_register_backend(&blkback);
    17.6  }
    17.7 +
    17.8 +/*
    17.9 + * Local variables:
   17.10 + *  c-file-style: "linux"
   17.11 + *  indent-tabs-mode: t
   17.12 + *  c-indent-level: 8
   17.13 + *  c-basic-offset: 8
   17.14 + *  tab-width: 8
   17.15 + * End:
   17.16 + */
    18.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/console.c	Thu Sep 22 16:05:44 2005 +0100
    18.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/console.c	Thu Sep 22 16:12:14 2005 +0100
    18.3 @@ -75,31 +75,33 @@ extern int sysrq_enabled;
    18.4  
    18.5  static int __init xencons_setup(char *str)
    18.6  {
    18.7 -    char *q;
    18.8 -    int n;
    18.9 -
   18.10 -    if ( !strncmp(str, "ttyS", 4) )
   18.11 -        xc_mode = XC_SERIAL;
   18.12 -    else if ( !strncmp(str, "tty", 3) )
   18.13 -        xc_mode = XC_TTY;
   18.14 -    else if ( !strncmp(str, "off", 3) )
   18.15 -        xc_mode = XC_OFF;
   18.16 +	char *q;
   18.17 +	int n;
   18.18  
   18.19 -    switch ( xc_mode )
   18.20 -    {
   18.21 -    case XC_SERIAL:
   18.22 -        n = simple_strtol( str+4, &q, 10 );
   18.23 -        if ( q > (str + 4) ) xc_num = n;
   18.24 -        break;
   18.25 -    case XC_TTY:
   18.26 -        n = simple_strtol( str+3, &q, 10 );
   18.27 -        if ( q > (str + 3) ) xc_num = n;
   18.28 -        break;
   18.29 -    default:
   18.30 -        break;
   18.31 -    }
   18.32 +	if (!strncmp(str, "ttyS", 4))
   18.33 +		xc_mode = XC_SERIAL;
   18.34 +	else if (!strncmp(str, "tty", 3))
   18.35 +		xc_mode = XC_TTY;
   18.36 +	else if (!strncmp(str, "off", 3))
   18.37 +		xc_mode = XC_OFF;
   18.38  
   18.39 -    return 1;
   18.40 +	switch ( xc_mode )
   18.41 +	{
   18.42 +	case XC_SERIAL:
   18.43 +		n = simple_strtol(str+4, &q, 10);
   18.44 +		if (q > (str + 4))
   18.45 +			xc_num = n;
   18.46 +		break;
   18.47 +	case XC_TTY:
   18.48 +		n = simple_strtol(str+3, &q, 10);
   18.49 +		if (q > (str + 3))
   18.50 +			xc_num = n;
   18.51 +		break;
   18.52 +	default:
   18.53 +		break;
   18.54 +	}
   18.55 +
   18.56 +	return 1;
   18.57  }
   18.58  __setup("xencons=", xencons_setup);
   18.59  
   18.60 @@ -111,11 +113,11 @@ static unsigned int wc, wp; /* write_con
   18.61  
   18.62  static int __init xencons_bufsz_setup(char *str)
   18.63  {
   18.64 -    unsigned int goal;
   18.65 -    goal = simple_strtoul(str, NULL, 0);
   18.66 -    while ( wbuf_size < goal )
   18.67 -        wbuf_size <<= 1;
   18.68 -    return 1;
   18.69 +	unsigned int goal;
   18.70 +	goal = simple_strtoul(str, NULL, 0);
   18.71 +	while (wbuf_size < goal)
   18.72 +		wbuf_size <<= 1;
   18.73 +	return 1;
   18.74  }
   18.75  __setup("xencons_bufsz=", xencons_bufsz_setup);
   18.76  
   18.77 @@ -135,57 +137,55 @@ static struct tty_driver xencons_driver;
   18.78  /******************** Kernel console driver ********************************/
   18.79  
   18.80  static void kcons_write(
   18.81 -    struct console *c, const char *s, unsigned int count)
   18.82 +	struct console *c, const char *s, unsigned int count)
   18.83  {
   18.84 -    int           i;
   18.85 -    unsigned long flags;
   18.86 +	int           i;
   18.87 +	unsigned long flags;
   18.88  
   18.89 -    spin_lock_irqsave(&xencons_lock, flags);
   18.90 +	spin_lock_irqsave(&xencons_lock, flags);
   18.91      
   18.92 -    for ( i = 0; i < count; i++ )
   18.93 -    {
   18.94 -        if ( (wp - wc) >= (wbuf_size - 1) )
   18.95 -            break;
   18.96 -        if ( (wbuf[WBUF_MASK(wp++)] = s[i]) == '\n' )
   18.97 -            wbuf[WBUF_MASK(wp++)] = '\r';
   18.98 -    }
   18.99 +	for (i = 0; i < count; i++) {
  18.100 +		if ((wp - wc) >= (wbuf_size - 1))
  18.101 +			break;
  18.102 +		if ((wbuf[WBUF_MASK(wp++)] = s[i]) == '\n')
  18.103 +			wbuf[WBUF_MASK(wp++)] = '\r';
  18.104 +	}
  18.105  
  18.106 -    __xencons_tx_flush();
  18.107 +	__xencons_tx_flush();
  18.108  
  18.109 -    spin_unlock_irqrestore(&xencons_lock, flags);
  18.110 +	spin_unlock_irqrestore(&xencons_lock, flags);
  18.111  }
  18.112  
  18.113  static void kcons_write_dom0(
  18.114 -    struct console *c, const char *s, unsigned int count)
  18.115 +	struct console *c, const char *s, unsigned int count)
  18.116  {
  18.117 -    int rc;
  18.118 +	int rc;
  18.119  
  18.120 -    while ( (count > 0) &&
  18.121 -            ((rc = HYPERVISOR_console_io(
  18.122 -                CONSOLEIO_write, count, (char *)s)) > 0) )
  18.123 -    {
  18.124 -        count -= rc;
  18.125 -        s += rc;
  18.126 -    }
  18.127 +	while ((count > 0) &&
  18.128 +	       ((rc = HYPERVISOR_console_io(
  18.129 +			CONSOLEIO_write, count, (char *)s)) > 0)) {
  18.130 +		count -= rc;
  18.131 +		s += rc;
  18.132 +	}
  18.133  }
  18.134  
  18.135  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  18.136  static struct tty_driver *kcons_device(struct console *c, int *index)
  18.137  {
  18.138 -    *index = c->index;
  18.139 -    return xencons_driver;
  18.140 +	*index = c->index;
  18.141 +	return xencons_driver;
  18.142  }
  18.143  #else
  18.144  static kdev_t kcons_device(struct console *c)
  18.145  {
  18.146 -    return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1);
  18.147 +	return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1);
  18.148  }
  18.149  #endif
  18.150  
  18.151  static struct console kcons_info = {
  18.152 -    .device	= kcons_device,
  18.153 -    .flags	= CON_PRINTBUFFER,
  18.154 -    .index	= -1,
  18.155 +	.device	= kcons_device,
  18.156 +	.flags	= CON_PRINTBUFFER,
  18.157 +	.index	= -1,
  18.158  };
  18.159  
  18.160  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  18.161 @@ -196,44 +196,42 @@ static int __init xen_console_init(void)
  18.162  void xen_console_init(void)
  18.163  #endif
  18.164  {
  18.165 -    if ( xen_start_info->flags & SIF_INITDOMAIN )
  18.166 -    {
  18.167 -        if ( xc_mode == XC_DEFAULT )
  18.168 -            xc_mode = XC_SERIAL;
  18.169 -        kcons_info.write = kcons_write_dom0;
  18.170 +	if (xen_start_info->flags & SIF_INITDOMAIN) {
  18.171 +		if (xc_mode == XC_DEFAULT)
  18.172 +			xc_mode = XC_SERIAL;
  18.173 +		kcons_info.write = kcons_write_dom0;
  18.174  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  18.175 -        if ( xc_mode == XC_SERIAL )
  18.176 -            kcons_info.flags |= CON_ENABLED;
  18.177 +		if (xc_mode == XC_SERIAL)
  18.178 +			kcons_info.flags |= CON_ENABLED;
  18.179  #endif
  18.180 -    }
  18.181 -    else
  18.182 -    {
  18.183 -        if ( xc_mode == XC_DEFAULT )
  18.184 -            xc_mode = XC_TTY;
  18.185 -        kcons_info.write = kcons_write;
  18.186 -    }
  18.187 +	} else {
  18.188 +		if (xc_mode == XC_DEFAULT)
  18.189 +			xc_mode = XC_TTY;
  18.190 +		kcons_info.write = kcons_write;
  18.191 +	}
  18.192  
  18.193 -    switch ( xc_mode )
  18.194 -    {
  18.195 -    case XC_SERIAL:
  18.196 -        strcpy(kcons_info.name, "ttyS");
  18.197 -        if ( xc_num == -1 ) xc_num = 0;
  18.198 -        break;
  18.199 +	switch (xc_mode) {
  18.200 +	case XC_SERIAL:
  18.201 +		strcpy(kcons_info.name, "ttyS");
  18.202 +		if (xc_num == -1)
  18.203 +			xc_num = 0;
  18.204 +		break;
  18.205  
  18.206 -    case XC_TTY:
  18.207 -        strcpy(kcons_info.name, "tty");
  18.208 -        if ( xc_num == -1 ) xc_num = 1;
  18.209 -        break;
  18.210 +	case XC_TTY:
  18.211 +		strcpy(kcons_info.name, "tty");
  18.212 +		if (xc_num == -1)
  18.213 +			xc_num = 1;
  18.214 +		break;
  18.215  
  18.216 -    default:
  18.217 -        return __RETCODE;
  18.218 -    }
  18.219 +	default:
  18.220 +		return __RETCODE;
  18.221 +	}
  18.222  
  18.223 -    wbuf = alloc_bootmem(wbuf_size);
  18.224 +	wbuf = alloc_bootmem(wbuf_size);
  18.225  
  18.226 -    register_console(&kcons_info);
  18.227 +	register_console(&kcons_info);
  18.228  
  18.229 -    return __RETCODE;
  18.230 +	return __RETCODE;
  18.231  }
  18.232  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  18.233  console_initcall(xen_console_init);
  18.234 @@ -246,41 +244,40 @@ asmlinkage int xprintk(const char *fmt, 
  18.235  asmlinkage int xprintk(const char *fmt, ...)
  18.236  #endif
  18.237  {
  18.238 -    va_list args;
  18.239 -    int printk_len;
  18.240 -    static char printk_buf[1024];
  18.241 +	va_list args;
  18.242 +	int printk_len;
  18.243 +	static char printk_buf[1024];
  18.244      
  18.245 -    /* Emit the output into the temporary buffer */
  18.246 -    va_start(args, fmt);
  18.247 -    printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
  18.248 -    va_end(args);
  18.249 +	/* Emit the output into the temporary buffer */
  18.250 +	va_start(args, fmt);
  18.251 +	printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
  18.252 +	va_end(args);
  18.253  
  18.254 -    /* Send the processed output directly to Xen. */
  18.255 -    kcons_write_dom0(NULL, printk_buf, printk_len);
  18.256 +	/* Send the processed output directly to Xen. */
  18.257 +	kcons_write_dom0(NULL, printk_buf, printk_len);
  18.258  
  18.259 -    return 0;
  18.260 +	return 0;
  18.261  }
  18.262  
  18.263  /*** Forcibly flush console data before dying. ***/
  18.264  void xencons_force_flush(void)
  18.265  {
  18.266 -    int        sz;
  18.267 +	int sz;
  18.268  
  18.269 -    /* Emergency console is synchronous, so there's nothing to flush. */
  18.270 -    if ( xen_start_info->flags & SIF_INITDOMAIN )
  18.271 -        return;
  18.272 +	/* Emergency console is synchronous, so there's nothing to flush. */
  18.273 +	if (xen_start_info->flags & SIF_INITDOMAIN)
  18.274 +		return;
  18.275  
  18.276  
  18.277 -    /* Spin until console data is flushed through to the domain controller. */
  18.278 -    while ( (wc != wp) )
  18.279 -    {
  18.280 -	int sent = 0;
  18.281 -        if ( (sz = wp - wc) == 0 )
  18.282 -            continue;
  18.283 -	sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
  18.284 -	if (sent > 0)
  18.285 -	    wc += sent;
  18.286 -    }
  18.287 +	/* Spin until console data is flushed through to the daemon. */
  18.288 +	while (wc != wp) {
  18.289 +		int sent = 0;
  18.290 +		if ((sz = wp - wc) == 0)
  18.291 +			continue;
  18.292 +		sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
  18.293 +		if (sent > 0)
  18.294 +			wc += sent;
  18.295 +	}
  18.296  }
  18.297  
  18.298  
  18.299 @@ -305,362 +302,358 @@ static char x_char;
  18.300  /* Non-privileged receive callback. */
  18.301  static void xencons_rx(char *buf, unsigned len, struct pt_regs *regs)
  18.302  {
  18.303 -    int           i;
  18.304 -    unsigned long flags;
  18.305 +	int           i;
  18.306 +	unsigned long flags;
  18.307  
  18.308 -    spin_lock_irqsave(&xencons_lock, flags);
  18.309 -    if ( xencons_tty != NULL )
  18.310 -    {
  18.311 -        for ( i = 0; i < len; i++ ) {
  18.312 +	spin_lock_irqsave(&xencons_lock, flags);
  18.313 +	if (xencons_tty == NULL)
  18.314 +		goto out;
  18.315 +
  18.316 +	for (i = 0; i < len; i++) {
  18.317  #ifdef CONFIG_MAGIC_SYSRQ
  18.318 -            if (sysrq_enabled) {
  18.319 -                if (buf[i] == '\x0f') { /* ^O */
  18.320 -                    sysrq_requested = jiffies;
  18.321 -                    continue; /* don't print the sysrq key */
  18.322 -                } else if (sysrq_requested) {
  18.323 -                    unsigned long sysrq_timeout = sysrq_requested + HZ*2;
  18.324 -                    sysrq_requested = 0;
  18.325 -                    /* if it's been less than a timeout, do the sysrq */
  18.326 -                    if (time_before(jiffies, sysrq_timeout)) {
  18.327 -                        spin_unlock_irqrestore(&xencons_lock, flags);
  18.328 -                        handle_sysrq(buf[i], regs, xencons_tty);
  18.329 -                        spin_lock_irqsave(&xencons_lock, flags);
  18.330 -                        continue;
  18.331 -                    }
  18.332 -                }
  18.333 -            }
  18.334 +		if (sysrq_enabled) {
  18.335 +			if (buf[i] == '\x0f') { /* ^O */
  18.336 +				sysrq_requested = jiffies;
  18.337 +				continue; /* don't print the sysrq key */
  18.338 +			} else if (sysrq_requested) {
  18.339 +				unsigned long sysrq_timeout =
  18.340 +					sysrq_requested + HZ*2;
  18.341 +				sysrq_requested = 0;
  18.342 +				if (time_before(jiffies, sysrq_timeout)) {
  18.343 +					spin_unlock_irqrestore(
  18.344 +						&xencons_lock, flags);
  18.345 +					handle_sysrq(
  18.346 +						buf[i], regs, xencons_tty);
  18.347 +					spin_lock_irqsave(
  18.348 +						&xencons_lock, flags);
  18.349 +					continue;
  18.350 +				}
  18.351 +			}
  18.352 +		}
  18.353  #endif
  18.354 -            tty_insert_flip_char(xencons_tty, buf[i], 0);
  18.355 -        }
  18.356 -        tty_flip_buffer_push(xencons_tty);
  18.357 -    }
  18.358 -    spin_unlock_irqrestore(&xencons_lock, flags);
  18.359 +		tty_insert_flip_char(xencons_tty, buf[i], 0);
  18.360 +	}
  18.361 +	tty_flip_buffer_push(xencons_tty);
  18.362  
  18.363 + out:
  18.364 +	spin_unlock_irqrestore(&xencons_lock, flags);
  18.365  }
  18.366  
  18.367  /* Privileged and non-privileged transmit worker. */
  18.368  static void __xencons_tx_flush(void)
  18.369  {
  18.370 -    int        sz, work_done = 0;
  18.371 -
  18.372 -    if ( xen_start_info->flags & SIF_INITDOMAIN )
  18.373 -    {
  18.374 -        if ( x_char )
  18.375 -        {
  18.376 -            kcons_write_dom0(NULL, &x_char, 1);
  18.377 -            x_char = 0;
  18.378 -            work_done = 1;
  18.379 -        }
  18.380 +	int sz, work_done = 0;
  18.381  
  18.382 -        while ( wc != wp )
  18.383 -        {
  18.384 -            sz = wp - wc;
  18.385 -            if ( sz > (wbuf_size - WBUF_MASK(wc)) )
  18.386 -                sz = wbuf_size - WBUF_MASK(wc);
  18.387 -            kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
  18.388 -            wc += sz;
  18.389 -            work_done = 1;
  18.390 -        }
  18.391 -    }
  18.392 -    else
  18.393 -    {
  18.394 -        while ( x_char )
  18.395 -        {
  18.396 -	    if (xencons_ring_send(&x_char, 1) == 1) {
  18.397 -		x_char = 0;
  18.398 -		work_done = 1;
  18.399 -	    }
  18.400 -        }
  18.401 +	if (xen_start_info->flags & SIF_INITDOMAIN) {
  18.402 +		if (x_char) {
  18.403 +			kcons_write_dom0(NULL, &x_char, 1);
  18.404 +			x_char = 0;
  18.405 +			work_done = 1;
  18.406 +		}
  18.407  
  18.408 -        while ( wc != wp )
  18.409 -        {
  18.410 -	    int sent;
  18.411 -            sz = wp - wc;
  18.412 -	    if ( sz > (wbuf_size - WBUF_MASK(wc)) )
  18.413 -		sz = wbuf_size - WBUF_MASK(wc);
  18.414 -	    sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
  18.415 -	    if ( sent > 0 ) {
  18.416 -		wc += sent;
  18.417 -		work_done = 1;
  18.418 -	    }
  18.419 -        }
  18.420 -    }
  18.421 +		while (wc != wp) {
  18.422 +			sz = wp - wc;
  18.423 +			if (sz > (wbuf_size - WBUF_MASK(wc)))
  18.424 +				sz = wbuf_size - WBUF_MASK(wc);
  18.425 +			kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
  18.426 +			wc += sz;
  18.427 +			work_done = 1;
  18.428 +		}
  18.429 +	} else {
  18.430 +		while (x_char) {
  18.431 +			if (xencons_ring_send(&x_char, 1) == 1) {
  18.432 +				x_char = 0;
  18.433 +				work_done = 1;
  18.434 +			}
  18.435 +		}
  18.436  
  18.437 -    if ( work_done && (xencons_tty != NULL) )
  18.438 -    {
  18.439 -        wake_up_interruptible(&xencons_tty->write_wait);
  18.440 -        if ( (xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
  18.441 -             (xencons_tty->ldisc.write_wakeup != NULL) )
  18.442 -            (xencons_tty->ldisc.write_wakeup)(xencons_tty);
  18.443 -    }
  18.444 +		while (wc != wp) {
  18.445 +			int sent;
  18.446 +			sz = wp - wc;
  18.447 +			if (sz > (wbuf_size - WBUF_MASK(wc)))
  18.448 +				sz = wbuf_size - WBUF_MASK(wc);
  18.449 +			sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
  18.450 +			if (sent > 0) {
  18.451 +				wc += sent;
  18.452 +				work_done = 1;
  18.453 +			}
  18.454 +		}
  18.455 +	}
  18.456 +
  18.457 +	if (work_done && (xencons_tty != NULL))
  18.458 +	{
  18.459 +		wake_up_interruptible(&xencons_tty->write_wait);
  18.460 +		if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
  18.461 +		    (xencons_tty->ldisc.write_wakeup != NULL))
  18.462 +			(xencons_tty->ldisc.write_wakeup)(xencons_tty);
  18.463 +	}
  18.464  }
  18.465  
  18.466  /* Privileged receive callback and transmit kicker. */
  18.467  static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
  18.468                                            struct pt_regs *regs)
  18.469  {
  18.470 -    static char   rbuf[16];
  18.471 -    int           i, l;
  18.472 -    unsigned long flags;
  18.473 -
  18.474 -    spin_lock_irqsave(&xencons_lock, flags);
  18.475 +	static char   rbuf[16];
  18.476 +	int           i, l;
  18.477 +	unsigned long flags;
  18.478  
  18.479 -    if ( xencons_tty != NULL )
  18.480 -    {
  18.481 -        /* Receive work. */
  18.482 -        while ( (l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0 )
  18.483 -            for ( i = 0; i < l; i++ )
  18.484 -                tty_insert_flip_char(xencons_tty, rbuf[i], 0);
  18.485 -        if ( xencons_tty->flip.count != 0 )
  18.486 -            tty_flip_buffer_push(xencons_tty);
  18.487 -    }
  18.488 +	spin_lock_irqsave(&xencons_lock, flags);
  18.489  
  18.490 -    /* Transmit work. */
  18.491 -    __xencons_tx_flush();
  18.492 +	if (xencons_tty != NULL)
  18.493 +	{
  18.494 +		/* Receive work. */
  18.495 +		while ((l = HYPERVISOR_console_io(
  18.496 +			CONSOLEIO_read, 16, rbuf)) > 0)
  18.497 +			for (i = 0; i < l; i++)
  18.498 +				tty_insert_flip_char(xencons_tty, rbuf[i], 0);
  18.499 +		if (xencons_tty->flip.count != 0)
  18.500 +			tty_flip_buffer_push(xencons_tty);
  18.501 +	}
  18.502  
  18.503 -    spin_unlock_irqrestore(&xencons_lock, flags);
  18.504 +	/* Transmit work. */
  18.505 +	__xencons_tx_flush();
  18.506  
  18.507 -    return IRQ_HANDLED;
  18.508 +	spin_unlock_irqrestore(&xencons_lock, flags);
  18.509 +
  18.510 +	return IRQ_HANDLED;
  18.511  }
  18.512  
  18.513  static int xencons_write_room(struct tty_struct *tty)
  18.514  {
  18.515 -    return wbuf_size - (wp - wc);
  18.516 +	return wbuf_size - (wp - wc);
  18.517  }
  18.518  
  18.519  static int xencons_chars_in_buffer(struct tty_struct *tty)
  18.520  {
  18.521 -    return wp - wc;
  18.522 +	return wp - wc;
  18.523  }
  18.524  
  18.525  static void xencons_send_xchar(struct tty_struct *tty, char ch)
  18.526  {
  18.527 -    unsigned long flags;
  18.528 +	unsigned long flags;
  18.529  
  18.530 -    if ( TTY_INDEX(tty) != 0 )
  18.531 -        return;
  18.532 +	if (TTY_INDEX(tty) != 0)
  18.533 +		return;
  18.534  
  18.535 -    spin_lock_irqsave(&xencons_lock, flags);
  18.536 -    x_char = ch;
  18.537 -    __xencons_tx_flush();
  18.538 -    spin_unlock_irqrestore(&xencons_lock, flags);
  18.539 +	spin_lock_irqsave(&xencons_lock, flags);
  18.540 +	x_char = ch;
  18.541 +	__xencons_tx_flush();
  18.542 +	spin_unlock_irqrestore(&xencons_lock, flags);
  18.543  }
  18.544  
  18.545  static void xencons_throttle(struct tty_struct *tty)
  18.546  {
  18.547 -    if ( TTY_INDEX(tty) != 0 )
  18.548 -        return;
  18.549 +	if (TTY_INDEX(tty) != 0)
  18.550 +		return;
  18.551  
  18.552 -    if ( I_IXOFF(tty) )
  18.553 -        xencons_send_xchar(tty, STOP_CHAR(tty));
  18.554 +	if (I_IXOFF(tty))
  18.555 +		xencons_send_xchar(tty, STOP_CHAR(tty));
  18.556  }
  18.557  
  18.558  static void xencons_unthrottle(struct tty_struct *tty)
  18.559  {
  18.560 -    if ( TTY_INDEX(tty) != 0 )
  18.561 -        return;
  18.562 +	if (TTY_INDEX(tty) != 0)
  18.563 +		return;
  18.564  
  18.565 -    if ( I_IXOFF(tty) )
  18.566 -    {
  18.567 -        if ( x_char != 0 )
  18.568 -            x_char = 0;
  18.569 -        else
  18.570 -            xencons_send_xchar(tty, START_CHAR(tty));
  18.571 -    }
  18.572 +	if (I_IXOFF(tty)) {
  18.573 +		if (x_char != 0)
  18.574 +			x_char = 0;
  18.575 +		else
  18.576 +			xencons_send_xchar(tty, START_CHAR(tty));
  18.577 +	}
  18.578  }
  18.579  
  18.580  static void xencons_flush_buffer(struct tty_struct *tty)
  18.581  {
  18.582 -    unsigned long flags;
  18.583 +	unsigned long flags;
  18.584  
  18.585 -    if ( TTY_INDEX(tty) != 0 )
  18.586 -        return;
  18.587 +	if (TTY_INDEX(tty) != 0)
  18.588 +		return;
  18.589  
  18.590 -    spin_lock_irqsave(&xencons_lock, flags);
  18.591 -    wc = wp = 0;
  18.592 -    spin_unlock_irqrestore(&xencons_lock, flags);
  18.593 +	spin_lock_irqsave(&xencons_lock, flags);
  18.594 +	wc = wp = 0;
  18.595 +	spin_unlock_irqrestore(&xencons_lock, flags);
  18.596  }
  18.597  
  18.598  static inline int __xencons_put_char(int ch)
  18.599  {
  18.600 -    char _ch = (char)ch;
  18.601 -    if ( (wp - wc) == wbuf_size )
  18.602 -        return 0;
  18.603 -    wbuf[WBUF_MASK(wp++)] = _ch;
  18.604 -    return 1;
  18.605 +	char _ch = (char)ch;
  18.606 +	if ((wp - wc) == wbuf_size)
  18.607 +		return 0;
  18.608 +	wbuf[WBUF_MASK(wp++)] = _ch;
  18.609 +	return 1;
  18.610  }
  18.611  
  18.612  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  18.613  static int xencons_write(
  18.614 -    struct tty_struct *tty,
  18.615 -    const unsigned char *buf,
  18.616 -    int count)
  18.617 +	struct tty_struct *tty,
  18.618 +	const unsigned char *buf,
  18.619 +	int count)
  18.620  {
  18.621 -    int i;
  18.622 -    unsigned long flags;
  18.623 -
  18.624 -    if ( TTY_INDEX(tty) != 0 )
  18.625 -        return count;
  18.626 +	int i;
  18.627 +	unsigned long flags;
  18.628  
  18.629 -    spin_lock_irqsave(&xencons_lock, flags);
  18.630 -
  18.631 -    for ( i = 0; i < count; i++ )
  18.632 -        if ( !__xencons_put_char(buf[i]) )
  18.633 -            break;
  18.634 +	if (TTY_INDEX(tty) != 0)
  18.635 +		return count;
  18.636  
  18.637 -    if ( i != 0 )
  18.638 -        __xencons_tx_flush();
  18.639 +	spin_lock_irqsave(&xencons_lock, flags);
  18.640  
  18.641 -    spin_unlock_irqrestore(&xencons_lock, flags);
  18.642 +	for (i = 0; i < count; i++)
  18.643 +		if (!__xencons_put_char(buf[i]))
  18.644 +			break;
  18.645  
  18.646 -    return i;
  18.647 +	if (i != 0)
  18.648 +		__xencons_tx_flush();
  18.649 +
  18.650 +	spin_unlock_irqrestore(&xencons_lock, flags);
  18.651 +
  18.652 +	return i;
  18.653  }
  18.654  #else
  18.655  static int xencons_write(
  18.656 -    struct tty_struct *tty, 
  18.657 -    int from_user,
  18.658 -    const u_char *buf, 
  18.659 -    int count)
  18.660 +	struct tty_struct *tty, 
  18.661 +	int from_user,
  18.662 +	const u_char *buf, 
  18.663 +	int count)
  18.664  {
  18.665 -    int i;
  18.666 -    unsigned long flags;
  18.667 -
  18.668 -    if ( from_user && verify_area(VERIFY_READ, buf, count) )
  18.669 -        return -EINVAL;
  18.670 -
  18.671 -    if ( TTY_INDEX(tty) != 0 )
  18.672 -        return count;
  18.673 -
  18.674 -    spin_lock_irqsave(&xencons_lock, flags);
  18.675 +	int i;
  18.676 +	unsigned long flags;
  18.677  
  18.678 -    for ( i = 0; i < count; i++ )
  18.679 -    {
  18.680 -        char ch;
  18.681 -        if ( from_user )
  18.682 -            __get_user(ch, buf + i);
  18.683 -        else
  18.684 -            ch = buf[i];
  18.685 -        if ( !__xencons_put_char(ch) )
  18.686 -            break;
  18.687 -    }
  18.688 +	if (from_user && verify_area(VERIFY_READ, buf, count))
  18.689 +		return -EINVAL;
  18.690  
  18.691 -    if ( i != 0 )
  18.692 -        __xencons_tx_flush();
  18.693 +	if (TTY_INDEX(tty) != 0)
  18.694 +		return count;
  18.695  
  18.696 -    spin_unlock_irqrestore(&xencons_lock, flags);
  18.697 +	spin_lock_irqsave(&xencons_lock, flags);
  18.698  
  18.699 -    return i;
  18.700 +	for (i = 0; i < count; i++) {
  18.701 +		char ch;
  18.702 +		if (from_user)
  18.703 +			__get_user(ch, buf + i);
  18.704 +		else
  18.705 +			ch = buf[i];
  18.706 +		if (!__xencons_put_char(ch))
  18.707 +			break;
  18.708 +	}
  18.709 +
  18.710 +	if (i != 0)
  18.711 +		__xencons_tx_flush();
  18.712 +
  18.713 +	spin_unlock_irqrestore(&xencons_lock, flags);
  18.714 +
  18.715 +	return i;
  18.716  }
  18.717  #endif
  18.718  
  18.719  static void xencons_put_char(struct tty_struct *tty, u_char ch)
  18.720  {
  18.721 -    unsigned long flags;
  18.722 +	unsigned long flags;
  18.723  
  18.724 -    if ( TTY_INDEX(tty) != 0 )
  18.725 -        return;
  18.726 +	if (TTY_INDEX(tty) != 0)
  18.727 +		return;
  18.728  
  18.729 -    spin_lock_irqsave(&xencons_lock, flags);
  18.730 -    (void)__xencons_put_char(ch);
  18.731 -    spin_unlock_irqrestore(&xencons_lock, flags);
  18.732 +	spin_lock_irqsave(&xencons_lock, flags);
  18.733 +	(void)__xencons_put_char(ch);
  18.734 +	spin_unlock_irqrestore(&xencons_lock, flags);
  18.735  }
  18.736  
  18.737  static void xencons_flush_chars(struct tty_struct *tty)
  18.738  {
  18.739 -    unsigned long flags;
  18.740 +	unsigned long flags;
  18.741  
  18.742 -    if ( TTY_INDEX(tty) != 0 )
  18.743 -        return;
  18.744 +	if (TTY_INDEX(tty) != 0)
  18.745 +		return;
  18.746  
  18.747 -    spin_lock_irqsave(&xencons_lock, flags);
  18.748 -    __xencons_tx_flush();
  18.749 -    spin_unlock_irqrestore(&xencons_lock, flags);    
  18.750 +	spin_lock_irqsave(&xencons_lock, flags);
  18.751 +	__xencons_tx_flush();
  18.752 +	spin_unlock_irqrestore(&xencons_lock, flags);    
  18.753  }
  18.754  
  18.755  static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
  18.756  {
  18.757 -    unsigned long orig_jiffies = jiffies;
  18.758 -
  18.759 -    if ( TTY_INDEX(tty) != 0 )
  18.760 -        return;
  18.761 +	unsigned long orig_jiffies = jiffies;
  18.762  
  18.763 -    while ( DRV(tty->driver)->chars_in_buffer(tty) )
  18.764 -    {
  18.765 -        set_current_state(TASK_INTERRUPTIBLE);
  18.766 -        schedule_timeout(1);
  18.767 -        if ( signal_pending(current) )
  18.768 -            break;
  18.769 -        if ( (timeout != 0) && time_after(jiffies, orig_jiffies + timeout) )
  18.770 -            break;
  18.771 -    }
  18.772 +	if (TTY_INDEX(tty) != 0)
  18.773 +		return;
  18.774 +
  18.775 +	while (DRV(tty->driver)->chars_in_buffer(tty))
  18.776 +	{
  18.777 +		set_current_state(TASK_INTERRUPTIBLE);
  18.778 +		schedule_timeout(1);
  18.779 +		if (signal_pending(current))
  18.780 +			break;
  18.781 +		if ( (timeout != 0) &&
  18.782 +		     time_after(jiffies, orig_jiffies + timeout) )
  18.783 +			break;
  18.784 +	}
  18.785      
  18.786 -    set_current_state(TASK_RUNNING);
  18.787 +	set_current_state(TASK_RUNNING);
  18.788  }
  18.789  
  18.790  static int xencons_open(struct tty_struct *tty, struct file *filp)
  18.791  {
  18.792 -    unsigned long flags;
  18.793 -
  18.794 -    if ( TTY_INDEX(tty) != 0 )
  18.795 -        return 0;
  18.796 +	unsigned long flags;
  18.797  
  18.798 -    spin_lock_irqsave(&xencons_lock, flags);
  18.799 -    tty->driver_data = NULL;
  18.800 -    if ( xencons_tty == NULL )
  18.801 -        xencons_tty = tty;
  18.802 -    __xencons_tx_flush();
  18.803 -    spin_unlock_irqrestore(&xencons_lock, flags);    
  18.804 +	if (TTY_INDEX(tty) != 0)
  18.805 +		return 0;
  18.806  
  18.807 -    return 0;
  18.808 +	spin_lock_irqsave(&xencons_lock, flags);
  18.809 +	tty->driver_data = NULL;
  18.810 +	if (xencons_tty == NULL)
  18.811 +		xencons_tty = tty;
  18.812 +	__xencons_tx_flush();
  18.813 +	spin_unlock_irqrestore(&xencons_lock, flags);    
  18.814 +
  18.815 +	return 0;
  18.816  }
  18.817  
  18.818  static void xencons_close(struct tty_struct *tty, struct file *filp)
  18.819  {
  18.820 -    unsigned long flags;
  18.821 -
  18.822 -    if ( TTY_INDEX(tty) != 0 )
  18.823 -        return;
  18.824 +	unsigned long flags;
  18.825  
  18.826 -    if ( tty->count == 1 )
  18.827 -    {
  18.828 -        tty->closing = 1;
  18.829 -        tty_wait_until_sent(tty, 0);
  18.830 -        if ( DRV(tty->driver)->flush_buffer != NULL )
  18.831 -            DRV(tty->driver)->flush_buffer(tty);
  18.832 -        if ( tty->ldisc.flush_buffer != NULL )
  18.833 -            tty->ldisc.flush_buffer(tty);
  18.834 -        tty->closing = 0;
  18.835 -        spin_lock_irqsave(&xencons_lock, flags);
  18.836 -        xencons_tty = NULL;
  18.837 -        spin_unlock_irqrestore(&xencons_lock, flags);    
  18.838 -    }
  18.839 +	if (TTY_INDEX(tty) != 0)
  18.840 +		return;
  18.841 +
  18.842 +	if (tty->count == 1) {
  18.843 +		tty->closing = 1;
  18.844 +		tty_wait_until_sent(tty, 0);
  18.845 +		if (DRV(tty->driver)->flush_buffer != NULL)
  18.846 +			DRV(tty->driver)->flush_buffer(tty);
  18.847 +		if (tty->ldisc.flush_buffer != NULL)
  18.848 +			tty->ldisc.flush_buffer(tty);
  18.849 +		tty->closing = 0;
  18.850 +		spin_lock_irqsave(&xencons_lock, flags);
  18.851 +		xencons_tty = NULL;
  18.852 +		spin_unlock_irqrestore(&xencons_lock, flags);    
  18.853 +	}
  18.854  }
  18.855  
  18.856  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  18.857  static struct tty_operations xencons_ops = {
  18.858 -    .open = xencons_open,
  18.859 -    .close = xencons_close,
  18.860 -    .write = xencons_write,
  18.861 -    .write_room = xencons_write_room,
  18.862 -    .put_char = xencons_put_char,
  18.863 -    .flush_chars = xencons_flush_chars,
  18.864 -    .chars_in_buffer = xencons_chars_in_buffer,
  18.865 -    .send_xchar = xencons_send_xchar,
  18.866 -    .flush_buffer = xencons_flush_buffer,
  18.867 -    .throttle = xencons_throttle,
  18.868 -    .unthrottle = xencons_unthrottle,
  18.869 -    .wait_until_sent = xencons_wait_until_sent,
  18.870 +	.open = xencons_open,
  18.871 +	.close = xencons_close,
  18.872 +	.write = xencons_write,
  18.873 +	.write_room = xencons_write_room,
  18.874 +	.put_char = xencons_put_char,
  18.875 +	.flush_chars = xencons_flush_chars,
  18.876 +	.chars_in_buffer = xencons_chars_in_buffer,
  18.877 +	.send_xchar = xencons_send_xchar,
  18.878 +	.flush_buffer = xencons_flush_buffer,
  18.879 +	.throttle = xencons_throttle,
  18.880 +	.unthrottle = xencons_unthrottle,
  18.881 +	.wait_until_sent = xencons_wait_until_sent,
  18.882  };
  18.883  
  18.884  #ifdef CONFIG_XEN_PRIVILEGED_GUEST
  18.885  static const char *xennullcon_startup(void)
  18.886  {
  18.887 -    return NULL;
  18.888 +	return NULL;
  18.889  }
  18.890  
  18.891  static int xennullcon_dummy(void)
  18.892  {
  18.893 -    return 0;
  18.894 +	return 0;
  18.895  }
  18.896  
  18.897  #define DUMMY (void *)xennullcon_dummy
  18.898 @@ -672,122 +665,128 @@ static int xennullcon_dummy(void)
  18.899   */
  18.900  
  18.901  const struct consw xennull_con = {
  18.902 -    .owner =		THIS_MODULE,
  18.903 -    .con_startup =	xennullcon_startup,
  18.904 -    .con_init =		DUMMY,
  18.905 -    .con_deinit =	DUMMY,
  18.906 -    .con_clear =	DUMMY,
  18.907 -    .con_putc =		DUMMY,
  18.908 -    .con_putcs =	DUMMY,
  18.909 -    .con_cursor =	DUMMY,
  18.910 -    .con_scroll =	DUMMY,
  18.911 -    .con_bmove =	DUMMY,
  18.912 -    .con_switch =	DUMMY,
  18.913 -    .con_blank =	DUMMY,
  18.914 -    .con_font_set =	DUMMY,
  18.915 -    .con_font_get =	DUMMY,
  18.916 -    .con_font_default =	DUMMY,
  18.917 -    .con_font_copy =	DUMMY,
  18.918 -    .con_set_palette =	DUMMY,
  18.919 -    .con_scrolldelta =	DUMMY,
  18.920 +	.owner =		THIS_MODULE,
  18.921 +	.con_startup =	xennullcon_startup,
  18.922 +	.con_init =		DUMMY,
  18.923 +	.con_deinit =	DUMMY,
  18.924 +	.con_clear =	DUMMY,
  18.925 +	.con_putc =		DUMMY,
  18.926 +	.con_putcs =	DUMMY,
  18.927 +	.con_cursor =	DUMMY,
  18.928 +	.con_scroll =	DUMMY,
  18.929 +	.con_bmove =	DUMMY,
  18.930 +	.con_switch =	DUMMY,
  18.931 +	.con_blank =	DUMMY,
  18.932 +	.con_font_set =	DUMMY,
  18.933 +	.con_font_get =	DUMMY,
  18.934 +	.con_font_default =	DUMMY,
  18.935 +	.con_font_copy =	DUMMY,
  18.936 +	.con_set_palette =	DUMMY,
  18.937 +	.con_scrolldelta =	DUMMY,
  18.938  };
  18.939  #endif
  18.940  #endif
  18.941  
  18.942  static int __init xencons_init(void)
  18.943  {
  18.944 -    int rc;
  18.945 -
  18.946 -    if ( xc_mode == XC_OFF )
  18.947 -        return 0;
  18.948 -
  18.949 -    xencons_ring_init();
  18.950 -
  18.951 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  18.952 -    xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 
  18.953 -                                      1 : MAX_NR_CONSOLES);
  18.954 -    if ( xencons_driver == NULL )
  18.955 -        return -ENOMEM;
  18.956 -#else
  18.957 -    memset(&xencons_driver, 0, sizeof(struct tty_driver));
  18.958 -    xencons_driver.magic       = TTY_DRIVER_MAGIC;
  18.959 -    xencons_driver.refcount    = &xencons_refcount;
  18.960 -    xencons_driver.table       = xencons_table;
  18.961 -    xencons_driver.num         = (xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES;
  18.962 -#endif
  18.963 +	int rc;
  18.964  
  18.965 -    DRV(xencons_driver)->major           = TTY_MAJOR;
  18.966 -    DRV(xencons_driver)->type            = TTY_DRIVER_TYPE_SERIAL;
  18.967 -    DRV(xencons_driver)->subtype         = SERIAL_TYPE_NORMAL;
  18.968 -    DRV(xencons_driver)->init_termios    = tty_std_termios;
  18.969 -    DRV(xencons_driver)->flags           = 
  18.970 -        TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS;
  18.971 -    DRV(xencons_driver)->termios         = xencons_termios;
  18.972 -    DRV(xencons_driver)->termios_locked  = xencons_termios_locked;
  18.973 +	if (xc_mode == XC_OFF)
  18.974 +		return 0;
  18.975  
  18.976 -    if ( xc_mode == XC_SERIAL )
  18.977 -    {
  18.978 -        DRV(xencons_driver)->name        = "ttyS";
  18.979 -        DRV(xencons_driver)->minor_start = 64 + xc_num;
  18.980 -        DRV(xencons_driver)->name_base   = 0 + xc_num;
  18.981 -    }
  18.982 -    else
  18.983 -    {
  18.984 -        DRV(xencons_driver)->name        = "tty";
  18.985 -        DRV(xencons_driver)->minor_start = xc_num;
  18.986 -        DRV(xencons_driver)->name_base   = xc_num;
  18.987 -    }
  18.988 +	xencons_ring_init();
  18.989  
  18.990  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  18.991 -    tty_set_operations(xencons_driver, &xencons_ops);
  18.992 +	xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 
  18.993 +					  1 : MAX_NR_CONSOLES);
  18.994 +	if (xencons_driver == NULL)
  18.995 +		return -ENOMEM;
  18.996  #else
  18.997 -    xencons_driver.open            = xencons_open;
  18.998 -    xencons_driver.close           = xencons_close;
  18.999 -    xencons_driver.write           = xencons_write;
 18.1000 -    xencons_driver.write_room      = xencons_write_room;
 18.1001 -    xencons_driver.put_char        = xencons_put_char;
 18.1002 -    xencons_driver.flush_chars     = xencons_flush_chars;
 18.1003 -    xencons_driver.chars_in_buffer = xencons_chars_in_buffer;
 18.1004 -    xencons_driver.send_xchar      = xencons_send_xchar;
 18.1005 -    xencons_driver.flush_buffer    = xencons_flush_buffer;
 18.1006 -    xencons_driver.throttle        = xencons_throttle;
 18.1007 -    xencons_driver.unthrottle      = xencons_unthrottle;
 18.1008 -    xencons_driver.wait_until_sent = xencons_wait_until_sent;
 18.1009 +	memset(&xencons_driver, 0, sizeof(struct tty_driver));
 18.1010 +	xencons_driver.magic       = TTY_DRIVER_MAGIC;
 18.1011 +	xencons_driver.refcount    = &xencons_refcount;
 18.1012 +	xencons_driver.table       = xencons_table;
 18.1013 +	xencons_driver.num         =
 18.1014 +		(xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES;
 18.1015  #endif
 18.1016  
 18.1017 -    if ( (rc = tty_register_driver(DRV(xencons_driver))) != 0 )
 18.1018 -    {
 18.1019 -        printk("WARNING: Failed to register Xen virtual "
 18.1020 -               "console driver as '%s%d'\n",
 18.1021 -               DRV(xencons_driver)->name, DRV(xencons_driver)->name_base);
 18.1022 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 18.1023 -        put_tty_driver(xencons_driver);
 18.1024 -        xencons_driver = NULL;
 18.1025 -#endif
 18.1026 -        return rc;
 18.1027 -    }
 18.1028 +	DRV(xencons_driver)->major           = TTY_MAJOR;
 18.1029 +	DRV(xencons_driver)->type            = TTY_DRIVER_TYPE_SERIAL;
 18.1030 +	DRV(xencons_driver)->subtype         = SERIAL_TYPE_NORMAL;
 18.1031 +	DRV(xencons_driver)->init_termios    = tty_std_termios;
 18.1032 +	DRV(xencons_driver)->flags           = 
 18.1033 +		TTY_DRIVER_REAL_RAW |
 18.1034 +		TTY_DRIVER_RESET_TERMIOS |
 18.1035 +		TTY_DRIVER_NO_DEVFS;
 18.1036 +	DRV(xencons_driver)->termios         = xencons_termios;
 18.1037 +	DRV(xencons_driver)->termios_locked  = xencons_termios_locked;
 18.1038 +
 18.1039 +	if (xc_mode == XC_SERIAL)
 18.1040 +	{
 18.1041 +		DRV(xencons_driver)->name        = "ttyS";
 18.1042 +		DRV(xencons_driver)->minor_start = 64 + xc_num;
 18.1043 +		DRV(xencons_driver)->name_base   = 0 + xc_num;
 18.1044 +	} else {
 18.1045 +		DRV(xencons_driver)->name        = "tty";
 18.1046 +		DRV(xencons_driver)->minor_start = xc_num;
 18.1047 +		DRV(xencons_driver)->name_base   = xc_num;
 18.1048 +	}
 18.1049  
 18.1050  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 18.1051 -    tty_register_device(xencons_driver, 0, NULL);
 18.1052 +	tty_set_operations(xencons_driver, &xencons_ops);
 18.1053 +#else
 18.1054 +	xencons_driver.open            = xencons_open;
 18.1055 +	xencons_driver.close           = xencons_close;
 18.1056 +	xencons_driver.write           = xencons_write;
 18.1057 +	xencons_driver.write_room      = xencons_write_room;
 18.1058 +	xencons_driver.put_char        = xencons_put_char;
 18.1059 +	xencons_driver.flush_chars     = xencons_flush_chars;
 18.1060 +	xencons_driver.chars_in_buffer = xencons_chars_in_buffer;
 18.1061 +	xencons_driver.send_xchar      = xencons_send_xchar;
 18.1062 +	xencons_driver.flush_buffer    = xencons_flush_buffer;
 18.1063 +	xencons_driver.throttle        = xencons_throttle;
 18.1064 +	xencons_driver.unthrottle      = xencons_unthrottle;
 18.1065 +	xencons_driver.wait_until_sent = xencons_wait_until_sent;
 18.1066  #endif
 18.1067  
 18.1068 -    if ( xen_start_info->flags & SIF_INITDOMAIN )
 18.1069 -    {
 18.1070 -        xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
 18.1071 -        (void)request_irq(xencons_priv_irq,
 18.1072 -                          xencons_priv_interrupt, 0, "console", NULL);
 18.1073 -    }
 18.1074 -    else
 18.1075 -    {
 18.1076 -	
 18.1077 -	xencons_ring_register_receiver(xencons_rx);
 18.1078 -    }
 18.1079 +	if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) {
 18.1080 +		printk("WARNING: Failed to register Xen virtual "
 18.1081 +		       "console driver as '%s%d'\n",
 18.1082 +		       DRV(xencons_driver)->name, DRV(xencons_driver)->name_base);
 18.1083 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 18.1084 +		put_tty_driver(xencons_driver);
 18.1085 +		xencons_driver = NULL;
 18.1086 +#endif
 18.1087 +		return rc;
 18.1088 +	}
 18.1089  
 18.1090 -    printk("Xen virtual console successfully installed as %s%d\n",
 18.1091 -           DRV(xencons_driver)->name,
 18.1092 -           DRV(xencons_driver)->name_base );
 18.1093 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 18.1094 +	tty_register_device(xencons_driver, 0, NULL);
 18.1095 +#endif
 18.1096 +
 18.1097 +	if (xen_start_info->flags & SIF_INITDOMAIN) {
 18.1098 +		xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
 18.1099 +		(void)request_irq(xencons_priv_irq,
 18.1100 +				  xencons_priv_interrupt, 0, "console", NULL);
 18.1101 +	} else {
 18.1102 +		xencons_ring_register_receiver(xencons_rx);
 18.1103 +	}
 18.1104 +
 18.1105 +	printk("Xen virtual console successfully installed as %s%d\n",
 18.1106 +	       DRV(xencons_driver)->name,
 18.1107 +	       DRV(xencons_driver)->name_base );
 18.1108      
 18.1109 -    return 0;
 18.1110 +	return 0;
 18.1111  }
 18.1112  
 18.1113  module_init(xencons_init);
 18.1114 +
 18.1115 +/*
 18.1116 + * Local variables:
 18.1117 + *  c-file-style: "linux"
 18.1118 + *  indent-tabs-mode: t
 18.1119 + *  c-indent-level: 8
 18.1120 + *  c-basic-offset: 8
 18.1121 + *  tab-width: 8
 18.1122 + * End:
 18.1123 + */
    19.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c	Thu Sep 22 16:05:44 2005 +0100
    19.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c	Thu Sep 22 16:12:14 2005 +0100
    19.3 @@ -125,3 +125,13 @@ void xencons_resume(void)
    19.4  
    19.5  	(void)xencons_ring_init();
    19.6  }
    19.7 +
    19.8 +/*
    19.9 + * Local variables:
   19.10 + *  c-file-style: "linux"
   19.11 + *  indent-tabs-mode: t
   19.12 + *  c-indent-level: 8
   19.13 + *  c-basic-offset: 8
   19.14 + *  tab-width: 8
   19.15 + * End:
   19.16 + */
    20.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h	Thu Sep 22 16:05:44 2005 +0100
    20.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h	Thu Sep 22 16:12:14 2005 +0100
    20.3 @@ -3,12 +3,21 @@
    20.4  
    20.5  asmlinkage int xprintk(const char *fmt, ...);
    20.6  
    20.7 -
    20.8  int xencons_ring_init(void);
    20.9  int xencons_ring_send(const char *data, unsigned len);
   20.10  
   20.11 -typedef void (xencons_receiver_func)(char *buf, unsigned len, 
   20.12 -                                     struct pt_regs *regs);
   20.13 +typedef void (xencons_receiver_func)(
   20.14 +	char *buf, unsigned len, struct pt_regs *regs);
   20.15  void xencons_ring_register_receiver(xencons_receiver_func *f);
   20.16  
   20.17  #endif /* _XENCONS_RING_H */
   20.18 +
   20.19 +/*
   20.20 + * Local variables:
   20.21 + *  c-file-style: "linux"
   20.22 + *  indent-tabs-mode: t
   20.23 + *  c-indent-level: 8
   20.24 + *  c-basic-offset: 8
   20.25 + *  tab-width: 8
   20.26 + * End:
   20.27 + */
    21.1 --- a/linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c	Thu Sep 22 16:05:44 2005 +0100
    21.2 +++ b/linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c	Thu Sep 22 16:12:14 2005 +0100
    21.3 @@ -1,9 +1,9 @@
    21.4  /******************************************************************************
    21.5   * evtchn.c
    21.6   * 
    21.7 - * Xenolinux driver for receiving and demuxing event-channel signals.
    21.8 + * Driver for receiving and demuxing event-channel signals.
    21.9   * 
   21.10 - * Copyright (c) 2004, K A Fraser
   21.11 + * Copyright (c) 2004-2005, K A Fraser
   21.12   * Multi-process extensions Copyright (c) 2004, Steven Smith
   21.13   * 
   21.14   * This file may be distributed separately from the Linux kernel, or
   21.15 @@ -46,29 +46,18 @@
   21.16  #include <linux/init.h>
   21.17  #define XEN_EVTCHN_MASK_OPS
   21.18  #include <asm-xen/evtchn.h>
   21.19 -
   21.20 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
   21.21 -#include <linux/devfs_fs_kernel.h>
   21.22 -#define OLD_DEVFS
   21.23 -#else
   21.24  #include <linux/gfp.h>
   21.25 -#endif
   21.26 -
   21.27 -#ifdef OLD_DEVFS
   21.28 -/* NB. This must be shared amongst drivers if more things go in /dev/xen */
   21.29 -static devfs_handle_t xen_dev_dir;
   21.30 -#endif
   21.31  
   21.32  struct per_user_data {
   21.33 -    /* Notification ring, accessed via /dev/xen/evtchn. */
   21.34 -#   define EVTCHN_RING_SIZE     2048  /* 2048 16-bit entries */
   21.35 -#   define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
   21.36 -    u16 *ring;
   21.37 -    unsigned int ring_cons, ring_prod, ring_overflow;
   21.38 +	/* Notification ring, accessed via /dev/xen/evtchn. */
   21.39 +#define EVTCHN_RING_SIZE     2048  /* 2048 16-bit entries */
   21.40 +#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
   21.41 +	u16 *ring;
   21.42 +	unsigned int ring_cons, ring_prod, ring_overflow;
   21.43  
   21.44 -    /* Processes wait on this queue when ring is empty. */
   21.45 -    wait_queue_head_t evtchn_wait;
   21.46 -    struct fasync_struct *evtchn_async_queue;
   21.47 +	/* Processes wait on this queue when ring is empty. */
   21.48 +	wait_queue_head_t evtchn_wait;
   21.49 +	struct fasync_struct *evtchn_async_queue;
   21.50  };
   21.51  
   21.52  /* Who's bound to each port? */
   21.53 @@ -77,356 +66,310 @@ static spinlock_t port_user_lock;
   21.54  
   21.55  void evtchn_device_upcall(int port)
   21.56  {
   21.57 -    struct per_user_data *u;
   21.58 -
   21.59 -    spin_lock(&port_user_lock);
   21.60 -
   21.61 -    mask_evtchn(port);
   21.62 -    clear_evtchn(port);
   21.63 +	struct per_user_data *u;
   21.64  
   21.65 -    if ( (u = port_user[port]) != NULL )
   21.66 -    {
   21.67 -        if ( (u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE )
   21.68 -        {
   21.69 -            u->ring[EVTCHN_RING_MASK(u->ring_prod)] = (u16)port;
   21.70 -            if ( u->ring_cons == u->ring_prod++ )
   21.71 -            {
   21.72 -                wake_up_interruptible(&u->evtchn_wait);
   21.73 -                kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN);
   21.74 -            }
   21.75 -        }
   21.76 -        else
   21.77 -        {
   21.78 -            u->ring_overflow = 1;
   21.79 -        }
   21.80 -    }
   21.81 +	spin_lock(&port_user_lock);
   21.82  
   21.83 -    spin_unlock(&port_user_lock);
   21.84 +	mask_evtchn(port);
   21.85 +	clear_evtchn(port);
   21.86 +
   21.87 +	if ((u = port_user[port]) != NULL) {
   21.88 +		if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
   21.89 +			u->ring[EVTCHN_RING_MASK(u->ring_prod)] = (u16)port;
   21.90 +			if (u->ring_cons == u->ring_prod++) {
   21.91 +				wake_up_interruptible(&u->evtchn_wait);
   21.92 +				kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN);
   21.93 +			}
   21.94 +		} else {
   21.95 +			u->ring_overflow = 1;
   21.96 +		}
   21.97 +	}
   21.98 +
   21.99 +	spin_unlock(&port_user_lock);
  21.100  }
  21.101  
  21.102  static ssize_t evtchn_read(struct file *file, char *buf,
  21.103                             size_t count, loff_t *ppos)
  21.104  {
  21.105 -    int rc;
  21.106 -    unsigned int c, p, bytes1 = 0, bytes2 = 0;
  21.107 -    DECLARE_WAITQUEUE(wait, current);
  21.108 -    struct per_user_data *u = file->private_data;
  21.109 -
  21.110 -    add_wait_queue(&u->evtchn_wait, &wait);
  21.111 -
  21.112 -    count &= ~1; /* even number of bytes */
  21.113 -
  21.114 -    if ( count == 0 )
  21.115 -    {
  21.116 -        rc = 0;
  21.117 -        goto out;
  21.118 -    }
  21.119 -
  21.120 -    if ( count > PAGE_SIZE )
  21.121 -        count = PAGE_SIZE;
  21.122 -
  21.123 -    for ( ; ; )
  21.124 -    {
  21.125 -        set_current_state(TASK_INTERRUPTIBLE);
  21.126 -
  21.127 -        if ( (c = u->ring_cons) != (p = u->ring_prod) )
  21.128 -            break;
  21.129 -
  21.130 -        if ( u->ring_overflow )
  21.131 -        {
  21.132 -            rc = -EFBIG;
  21.133 -            goto out;
  21.134 -        }
  21.135 -
  21.136 -        if ( file->f_flags & O_NONBLOCK )
  21.137 -        {
  21.138 -            rc = -EAGAIN;
  21.139 -            goto out;
  21.140 -        }
  21.141 +	int rc;
  21.142 +	unsigned int c, p, bytes1 = 0, bytes2 = 0;
  21.143 +	DECLARE_WAITQUEUE(wait, current);
  21.144 +	struct per_user_data *u = file->private_data;
  21.145  
  21.146 -        if ( signal_pending(current) )
  21.147 -        {
  21.148 -            rc = -ERESTARTSYS;
  21.149 -            goto out;
  21.150 -        }
  21.151 -
  21.152 -        schedule();
  21.153 -    }
  21.154 -
  21.155 -    /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
  21.156 -    if ( ((c ^ p) & EVTCHN_RING_SIZE) != 0 )
  21.157 -    {
  21.158 -        bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * sizeof(u16);
  21.159 -        bytes2 = EVTCHN_RING_MASK(p) * sizeof(u16);
  21.160 -    }
  21.161 -    else
  21.162 -    {
  21.163 -        bytes1 = (p - c) * sizeof(u16);
  21.164 -        bytes2 = 0;
  21.165 -    }
  21.166 +	add_wait_queue(&u->evtchn_wait, &wait);
  21.167  
  21.168 -    /* Truncate chunks according to caller's maximum byte count. */
  21.169 -    if ( bytes1 > count )
  21.170 -    {
  21.171 -        bytes1 = count;
  21.172 -        bytes2 = 0;
  21.173 -    }
  21.174 -    else if ( (bytes1 + bytes2) > count )
  21.175 -    {
  21.176 -        bytes2 = count - bytes1;
  21.177 -    }
  21.178 +	count &= ~1; /* even number of bytes */
  21.179  
  21.180 -    if ( copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
  21.181 -         ((bytes2 != 0) && copy_to_user(&buf[bytes1], &u->ring[0], bytes2)) )
  21.182 -    {
  21.183 -        rc = -EFAULT;
  21.184 -        goto out;
  21.185 -    }
  21.186 +	if (count == 0) {
  21.187 +		rc = 0;
  21.188 +		goto out;
  21.189 +	}
  21.190  
  21.191 -    u->ring_cons += (bytes1 + bytes2) / sizeof(u16);
  21.192 +	if (count > PAGE_SIZE)
  21.193 +		count = PAGE_SIZE;
  21.194  
  21.195 -    rc = bytes1 + bytes2;
  21.196 +	for (;;) {
  21.197 +		set_current_state(TASK_INTERRUPTIBLE);
  21.198 +
  21.199 +		if ((c = u->ring_cons) != (p = u->ring_prod))
  21.200 +			break;
  21.201 +
  21.202 +		if (u->ring_overflow) {
  21.203 +			rc = -EFBIG;
  21.204 +			goto out;
  21.205 +		}
  21.206 +
  21.207 +		if (file->f_flags & O_NONBLOCK) {
  21.208 +			rc = -EAGAIN;
  21.209 +			goto out;
  21.210 +		}
  21.211 +
  21.212 +		if (signal_pending(current)) {
  21.213 +			rc = -ERESTARTSYS;
  21.214 +			goto out;
  21.215 +		}
  21.216 +
  21.217 +		schedule();
  21.218 +	}
  21.219 +
  21.220 +	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
  21.221 +	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
  21.222 +		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
  21.223 +			sizeof(u16);
  21.224 +		bytes2 = EVTCHN_RING_MASK(p) * sizeof(u16);
  21.225 +	} else {
  21.226 +		bytes1 = (p - c) * sizeof(u16);
  21.227 +		bytes2 = 0;
  21.228 +	}
  21.229 +
  21.230 +	/* Truncate chunks according to caller's maximum byte count. */
  21.231 +	if (bytes1 > count) {
  21.232 +		bytes1 = count;
  21.233 +		bytes2 = 0;
  21.234 +	} else if ((bytes1 + bytes2) > count) {
  21.235 +		bytes2 = count - bytes1;
  21.236 +	}
  21.237 +
  21.238 +	if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
  21.239 +	    ((bytes2 != 0) &&
  21.240 +	     copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) {
  21.241 +		rc = -EFAULT;
  21.242 +		goto out;
  21.243 +	}
  21.244 +
  21.245 +	u->ring_cons += (bytes1 + bytes2) / sizeof(u16);
  21.246 +
  21.247 +	rc = bytes1 + bytes2;
  21.248  
  21.249   out:
  21.250 -    __set_current_state(TASK_RUNNING);
  21.251 -    remove_wait_queue(&u->evtchn_wait, &wait);
  21.252 -    return rc;
  21.253 +	__set_current_state(TASK_RUNNING);
  21.254 +	remove_wait_queue(&u->evtchn_wait, &wait);
  21.255 +	return rc;
  21.256  }
  21.257  
  21.258  static ssize_t evtchn_write(struct file *file, const char *buf,
  21.259                              size_t count, loff_t *ppos)
  21.260  {
  21.261 -    int  rc, i;
  21.262 -    u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
  21.263 -    struct per_user_data *u = file->private_data;
  21.264 -
  21.265 -    if ( kbuf == NULL )
  21.266 -        return -ENOMEM;
  21.267 -
  21.268 -    count &= ~1; /* even number of bytes */
  21.269 -
  21.270 -    if ( count == 0 )
  21.271 -    {
  21.272 -        rc = 0;
  21.273 -        goto out;
  21.274 -    }
  21.275 +	int  rc, i;
  21.276 +	u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
  21.277 +	struct per_user_data *u = file->private_data;
  21.278  
  21.279 -    if ( count > PAGE_SIZE )
  21.280 -        count = PAGE_SIZE;
  21.281 +	if (kbuf == NULL)
  21.282 +		return -ENOMEM;
  21.283  
  21.284 -    if ( copy_from_user(kbuf, buf, count) != 0 )
  21.285 -    {
  21.286 -        rc = -EFAULT;
  21.287 -        goto out;
  21.288 -    }
  21.289 +	count &= ~1; /* even number of bytes */
  21.290  
  21.291 -    spin_lock_irq(&port_user_lock);
  21.292 -    for ( i = 0; i < (count/2); i++ )
  21.293 -        if ( (kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u) )
  21.294 -            unmask_evtchn(kbuf[i]);
  21.295 -    spin_unlock_irq(&port_user_lock);
  21.296 +	if (count == 0) {
  21.297 +		rc = 0;
  21.298 +		goto out;
  21.299 +	}
  21.300  
  21.301 -    rc = count;
  21.302 +	if (count > PAGE_SIZE)
  21.303 +		count = PAGE_SIZE;
  21.304 +
  21.305 +	if (copy_from_user(kbuf, buf, count) != 0) {
  21.306 +		rc = -EFAULT;
  21.307 +		goto out;
  21.308 +	}
  21.309 +
  21.310 +	spin_lock_irq(&port_user_lock);
  21.311 +	for (i = 0; i < (count/2); i++)
  21.312 +		if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
  21.313 +			unmask_evtchn(kbuf[i]);
  21.314 +	spin_unlock_irq(&port_user_lock);
  21.315 +
  21.316 +	rc = count;
  21.317  
  21.318   out:
  21.319 -    free_page((unsigned long)kbuf);
  21.320 -    return rc;
  21.321 +	free_page((unsigned long)kbuf);
  21.322 +	return rc;
  21.323  }
  21.324  
  21.325  static int evtchn_ioctl(struct inode *inode, struct file *file,
  21.326                          unsigned int cmd, unsigned long arg)
  21.327  {
  21.328 -    int rc = 0;
  21.329 -    struct per_user_data *u = file->private_data;
  21.330 -
  21.331 -    spin_lock_irq(&port_user_lock);
  21.332 -    
  21.333 -    switch ( cmd )
  21.334 -    {
  21.335 -    case EVTCHN_RESET:
  21.336 -        /* Initialise the ring to empty. Clear errors. */
  21.337 -        u->ring_cons = u->ring_prod = u->ring_overflow = 0;
  21.338 -        break;
  21.339 +	int rc = 0;
  21.340 +	struct per_user_data *u = file->private_data;
  21.341  
  21.342 -    case EVTCHN_BIND:
  21.343 -        if ( arg >= NR_EVENT_CHANNELS )
  21.344 -        {
  21.345 -            rc = -EINVAL;
  21.346 -        }
  21.347 -        else if ( port_user[arg] != NULL )
  21.348 -        {
  21.349 -            rc = -EISCONN;
  21.350 -        }
  21.351 -        else
  21.352 -        {
  21.353 -            port_user[arg] = u;
  21.354 -            unmask_evtchn(arg);
  21.355 -        }
  21.356 -        break;
  21.357 +	spin_lock_irq(&port_user_lock);
  21.358 +    
  21.359 +	switch (cmd) {
  21.360 +	case EVTCHN_RESET:
  21.361 +		/* Initialise the ring to empty. Clear errors. */
  21.362 +		u->ring_cons = u->ring_prod = u->ring_overflow = 0;
  21.363 +		break;
  21.364  
  21.365 -    case EVTCHN_UNBIND:
  21.366 -        if ( arg >= NR_EVENT_CHANNELS )
  21.367 -        {
  21.368 -            rc = -EINVAL;
  21.369 -        }
  21.370 -        else if ( port_user[arg] != u )
  21.371 -        {
  21.372 -            rc = -ENOTCONN;
  21.373 -        }
  21.374 -        else
  21.375 -        {
  21.376 -            port_user[arg] = NULL;
  21.377 -            mask_evtchn(arg);
  21.378 -        }
  21.379 -        break;
  21.380 +	case EVTCHN_BIND:
  21.381 +		if (arg >= NR_EVENT_CHANNELS) {
  21.382 +			rc = -EINVAL;
  21.383 +		} else if (port_user[arg] != NULL) {
  21.384 +			rc = -EISCONN;
  21.385 +		} else {
  21.386 +			port_user[arg] = u;
  21.387 +			unmask_evtchn(arg);
  21.388 +		}
  21.389 +		break;
  21.390  
  21.391 -    default:
  21.392 -        rc = -ENOSYS;
  21.393 -        break;
  21.394 -    }
  21.395 +	case EVTCHN_UNBIND:
  21.396 +		if (arg >= NR_EVENT_CHANNELS) {
  21.397 +			rc = -EINVAL;
  21.398 +		} else if (port_user[arg] != u) {
  21.399 +			rc = -ENOTCONN;
  21.400 +		} else {
  21.401 +			port_user[arg] = NULL;
  21.402 +			mask_evtchn(arg);
  21.403 +		}
  21.404 +		break;
  21.405  
  21.406 -    spin_unlock_irq(&port_user_lock);   
  21.407 +	default:
  21.408 +		rc = -ENOSYS;
  21.409 +		break;
  21.410 +	}
  21.411  
  21.412 -    return rc;
  21.413 +	spin_unlock_irq(&port_user_lock);   
  21.414 +
  21.415 +	return rc;
  21.416  }
  21.417  
  21.418  static unsigned int evtchn_poll(struct file *file, poll_table *wait)
  21.419  {
  21.420 -    unsigned int mask = POLLOUT | POLLWRNORM;
  21.421 -    struct per_user_data *u = file->private_data;
  21.422 +	unsigned int mask = POLLOUT | POLLWRNORM;
  21.423 +	struct per_user_data *u = file->private_data;
  21.424  
  21.425 -    poll_wait(file, &u->evtchn_wait, wait);
  21.426 -    if ( u->ring_cons != u->ring_prod )
  21.427 -        mask |= POLLIN | POLLRDNORM;
  21.428 -    if ( u->ring_overflow )
  21.429 -        mask = POLLERR;
  21.430 -    return mask;
  21.431 +	poll_wait(file, &u->evtchn_wait, wait);
  21.432 +	if (u->ring_cons != u->ring_prod)
  21.433 +		mask |= POLLIN | POLLRDNORM;
  21.434 +	if (u->ring_overflow)
  21.435 +		mask = POLLERR;
  21.436 +	return mask;
  21.437  }
  21.438  
  21.439  static int evtchn_fasync(int fd, struct file *filp, int on)
  21.440  {
  21.441 -    struct per_user_data *u = filp->private_data;
  21.442 -    return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
  21.443 +	struct per_user_data *u = filp->private_data;
  21.444 +	return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
  21.445  }
  21.446  
  21.447  static int evtchn_open(struct inode *inode, struct file *filp)
  21.448  {
  21.449 -    struct per_user_data *u;
  21.450 -
  21.451 -    if ( (u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL )
  21.452 -        return -ENOMEM;
  21.453 -
  21.454 -    memset(u, 0, sizeof(*u));
  21.455 -    init_waitqueue_head(&u->evtchn_wait);
  21.456 +	struct per_user_data *u;
  21.457  
  21.458 -    if ( (u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL )
  21.459 -    {
  21.460 -        kfree(u);
  21.461 -        return -ENOMEM;
  21.462 -    }
  21.463 +	if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL)
  21.464 +		return -ENOMEM;
  21.465  
  21.466 -    filp->private_data = u;
  21.467 +	memset(u, 0, sizeof(*u));
  21.468 +	init_waitqueue_head(&u->evtchn_wait);
  21.469  
  21.470 -    return 0;
  21.471 +	if ((u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL)
  21.472 +	{
  21.473 +		kfree(u);
  21.474 +		return -ENOMEM;
  21.475 +	}
  21.476 +
  21.477 +	filp->private_data = u;
  21.478 +
  21.479 +	return 0;
  21.480  }
  21.481  
  21.482  static int evtchn_release(struct inode *inode, struct file *filp)
  21.483  {
  21.484 -    int i;
  21.485 -    struct per_user_data *u = filp->private_data;
  21.486 -
  21.487 -    spin_lock_irq(&port_user_lock);
  21.488 -
  21.489 -    free_page((unsigned long)u->ring);
  21.490 +	int i;
  21.491 +	struct per_user_data *u = filp->private_data;
  21.492  
  21.493 -    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
  21.494 -    {
  21.495 -        if ( port_user[i] == u )
  21.496 -        {
  21.497 -            port_user[i] = NULL;
  21.498 -            mask_evtchn(i);
  21.499 -        }
  21.500 -    }
  21.501 +	spin_lock_irq(&port_user_lock);
  21.502  
  21.503 -    spin_unlock_irq(&port_user_lock);
  21.504 +	free_page((unsigned long)u->ring);
  21.505  
  21.506 -    kfree(u);
  21.507 +	for (i = 0; i < NR_EVENT_CHANNELS; i++)
  21.508 +	{
  21.509 +		if (port_user[i] == u)
  21.510 +		{
  21.511 +			port_user[i] = NULL;
  21.512 +			mask_evtchn(i);
  21.513 +		}
  21.514 +	}
  21.515  
  21.516 -    return 0;
  21.517 +	spin_unlock_irq(&port_user_lock);
  21.518 +
  21.519 +	kfree(u);
  21.520 +
  21.521 +	return 0;
  21.522  }
  21.523  
  21.524  static struct file_operations evtchn_fops = {
  21.525 -    .owner   = THIS_MODULE,
  21.526 -    .read    = evtchn_read,
  21.527 -    .write   = evtchn_write,
  21.528 -    .ioctl   = evtchn_ioctl,
  21.529 -    .poll    = evtchn_poll,
  21.530 -    .fasync  = evtchn_fasync,
  21.531 -    .open    = evtchn_open,
  21.532 -    .release = evtchn_release,
  21.533 +	.owner   = THIS_MODULE,
  21.534 +	.read    = evtchn_read,
  21.535 +	.write   = evtchn_write,
  21.536 +	.ioctl   = evtchn_ioctl,
  21.537 +	.poll    = evtchn_poll,
  21.538 +	.fasync  = evtchn_fasync,
  21.539 +	.open    = evtchn_open,
  21.540 +	.release = evtchn_release,
  21.541  };
  21.542  
  21.543  static struct miscdevice evtchn_miscdev = {
  21.544 -    .minor        = EVTCHN_MINOR,
  21.545 -    .name         = "evtchn",
  21.546 -    .fops         = &evtchn_fops,
  21.547 +	.minor        = EVTCHN_MINOR,
  21.548 +	.name         = "evtchn",
  21.549 +	.fops         = &evtchn_fops,
  21.550  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  21.551 -    .devfs_name   = "misc/evtchn",
  21.552 +	.devfs_name   = "misc/evtchn",
  21.553  #endif
  21.554  };
  21.555  
  21.556  static int __init evtchn_init(void)
  21.557  {
  21.558 -#ifdef OLD_DEVFS
  21.559 -    devfs_handle_t symlink_handle;
  21.560 -    int            pos;
  21.561 -    char           link_dest[64];
  21.562 -#endif
  21.563 -    int err;
  21.564 -
  21.565 -    spin_lock_init(&port_user_lock);
  21.566 -    memset(port_user, 0, sizeof(port_user));
  21.567 -
  21.568 -    /* (DEVFS) create '/dev/misc/evtchn'. */
  21.569 -    err = misc_register(&evtchn_miscdev);
  21.570 -    if ( err != 0 )
  21.571 -    {
  21.572 -        printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
  21.573 -        return err;
  21.574 -    }
  21.575 -
  21.576 -#ifdef OLD_DEVFS
  21.577 -    /* (DEVFS) create directory '/dev/xen'. */
  21.578 -    xen_dev_dir = devfs_mk_dir(NULL, "xen", NULL);
  21.579 +	int err;
  21.580  
  21.581 -    /* (DEVFS) &link_dest[pos] == '../misc/evtchn'. */
  21.582 -    pos = devfs_generate_path(evtchn_miscdev.devfs_handle, 
  21.583 -                              &link_dest[3], 
  21.584 -                              sizeof(link_dest) - 3);
  21.585 -    if ( pos >= 0 )
  21.586 -        strncpy(&link_dest[pos], "../", 3);
  21.587 +	spin_lock_init(&port_user_lock);
  21.588 +	memset(port_user, 0, sizeof(port_user));
  21.589  
  21.590 -    /* (DEVFS) symlink '/dev/xen/evtchn' -> '../misc/evtchn'. */
  21.591 -    (void)devfs_mk_symlink(xen_dev_dir, 
  21.592 -                           "evtchn", 
  21.593 -                           DEVFS_FL_DEFAULT, 
  21.594 -                           &link_dest[pos],
  21.595 -                           &symlink_handle, 
  21.596 -                           NULL);
  21.597 +	/* (DEVFS) create '/dev/misc/evtchn'. */
  21.598 +	err = misc_register(&evtchn_miscdev);
  21.599 +	if (err != 0)
  21.600 +	{
  21.601 +		printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
  21.602 +		return err;
  21.603 +	}
  21.604  
  21.605 -    /* (DEVFS) automatically destroy the symlink with its destination. */
  21.606 -    devfs_auto_unregister(evtchn_miscdev.devfs_handle, symlink_handle);
  21.607 -#endif
  21.608 +	printk("Event-channel device installed.\n");
  21.609  
  21.610 -    printk("Event-channel device installed.\n");
  21.611 -
  21.612 -    return 0;
  21.613 +	return 0;
  21.614  }
  21.615  
  21.616  static void evtchn_cleanup(void)
  21.617  {
  21.618 -    misc_deregister(&evtchn_miscdev);
  21.619 +	misc_deregister(&evtchn_miscdev);
  21.620  }
  21.621  
  21.622  module_init(evtchn_init);
  21.623  module_exit(evtchn_cleanup);
  21.624 +
  21.625 +/*
  21.626 + * Local variables:
  21.627 + *  c-file-style: "linux"
  21.628 + *  indent-tabs-mode: t
  21.629 + *  c-indent-level: 8
  21.630 + *  c-basic-offset: 8
  21.631 + *  tab-width: 8
  21.632 + * End:
  21.633 + */
    22.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Thu Sep 22 16:05:44 2005 +0100
    22.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Thu Sep 22 16:12:14 2005 +0100
    22.3 @@ -62,9 +62,7 @@ typedef struct netif_st {
    22.4  	/* Private indexes into shared ring. */
    22.5  	NETIF_RING_IDX rx_req_cons;
    22.6  	NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
    22.7 -#ifdef CONFIG_XEN_NETDEV_GRANT
    22.8  	NETIF_RING_IDX rx_resp_prod_copy;
    22.9 -#endif
   22.10  	NETIF_RING_IDX tx_req_cons;
   22.11  	NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
   22.12  
    23.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Thu Sep 22 16:05:44 2005 +0100
    23.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Thu Sep 22 16:12:14 2005 +0100
    23.3 @@ -23,7 +23,7 @@ static void make_tx_response(netif_t *ne
    23.4  static int  make_rx_response(netif_t *netif, 
    23.5                               u16      id, 
    23.6                               s8       st,
    23.7 -                             unsigned long addr,
    23.8 +                             u16      offset,
    23.9                               u16      size,
   23.10                               u16      csum_valid);
   23.11  
   23.12 @@ -41,11 +41,7 @@ static struct sk_buff_head rx_queue;
   23.13  static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2+1];
   23.14  static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
   23.15  
   23.16 -#ifdef CONFIG_XEN_NETDEV_GRANT
   23.17  static gnttab_transfer_t grant_rx_op[MAX_PENDING_REQS];
   23.18 -#else
   23.19 -static struct mmuext_op rx_mmuext[NETIF_RX_RING_SIZE];
   23.20 -#endif
   23.21  static unsigned char rx_notify[NR_EVENT_CHANNELS];
   23.22  
   23.23  /* Don't currently gate addition of an interface to the tx scheduling list. */
   23.24 @@ -72,15 +68,10 @@ static PEND_RING_IDX dealloc_prod, deall
   23.25  
   23.26  static struct sk_buff_head tx_queue;
   23.27  
   23.28 -#ifdef CONFIG_XEN_NETDEV_GRANT
   23.29  static u16 grant_tx_ref[MAX_PENDING_REQS];
   23.30  static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
   23.31  static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
   23.32  
   23.33 -#else
   23.34 -static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
   23.35 -#endif
   23.36 -
   23.37  static struct list_head net_schedule_list;
   23.38  static spinlock_t net_schedule_list_lock;
   23.39  
   23.40 @@ -108,7 +99,7 @@ static unsigned long alloc_mfn(void)
   23.41  	return mfn;
   23.42  }
   23.43  
   23.44 -#ifndef CONFIG_XEN_NETDEV_GRANT
   23.45 +#if 0
   23.46  static void free_mfn(unsigned long mfn)
   23.47  {
   23.48  	unsigned long flags;
   23.49 @@ -180,18 +171,7 @@ int netif_be_start_xmit(struct sk_buff *
   23.50  		dev_kfree_skb(skb);
   23.51  		skb = nskb;
   23.52  	}
   23.53 -#ifdef CONFIG_XEN_NETDEV_GRANT
   23.54 -#ifdef DEBUG_GRANT
   23.55 -	printk(KERN_ALERT "#### be_xmit: req_prod=%d req_cons=%d "
   23.56 -	       "id=%04x gr=%04x\n",
   23.57 -	       netif->rx->req_prod,
   23.58 -	       netif->rx_req_cons,
   23.59 -	       netif->rx->ring[
   23.60 -		       MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.id,
   23.61 -	       netif->rx->ring[
   23.62 -		       MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.gref);
   23.63 -#endif
   23.64 -#endif
   23.65 +
   23.66  	netif->rx_req_cons++;
   23.67  	netif_get(netif);
   23.68  
   23.69 @@ -232,11 +212,7 @@ static void net_rx_action(unsigned long 
   23.70  	u16 size, id, evtchn;
   23.71  	multicall_entry_t *mcl;
   23.72  	mmu_update_t *mmu;
   23.73 -#ifdef CONFIG_XEN_NETDEV_GRANT
   23.74  	gnttab_transfer_t *gop;
   23.75 -#else
   23.76 -	struct mmuext_op *mmuext;
   23.77 -#endif
   23.78  	unsigned long vdata, old_mfn, new_mfn;
   23.79  	struct sk_buff_head rxq;
   23.80  	struct sk_buff *skb;
   23.81 @@ -247,11 +223,7 @@ static void net_rx_action(unsigned long 
   23.82  
   23.83  	mcl = rx_mcl;
   23.84  	mmu = rx_mmu;
   23.85 -#ifdef CONFIG_XEN_NETDEV_GRANT
   23.86  	gop = grant_rx_op;
   23.87 -#else
   23.88 -	mmuext = rx_mmuext;
   23.89 -#endif
   23.90  
   23.91  	while ((skb = skb_dequeue(&rx_queue)) != NULL) {
   23.92  		netif   = netdev_priv(skb->dev);
   23.93 @@ -277,25 +249,13 @@ static void net_rx_action(unsigned long 
   23.94  					pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
   23.95  		mcl++;
   23.96  
   23.97 -#ifdef CONFIG_XEN_NETDEV_GRANT
   23.98  		gop->mfn = old_mfn;
   23.99  		gop->domid = netif->domid;
  23.100  		gop->ref = netif->rx->ring[
  23.101  			MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref;
  23.102  		netif->rx_resp_prod_copy++;
  23.103  		gop++;
  23.104 -#else
  23.105 -		mcl->op = __HYPERVISOR_mmuext_op;
  23.106 -		mcl->args[0] = (unsigned long)mmuext;
  23.107 -		mcl->args[1] = 1;
  23.108 -		mcl->args[2] = 0;
  23.109 -		mcl->args[3] = netif->domid;
  23.110 -		mcl++;
  23.111  
  23.112 -		mmuext->cmd = MMUEXT_REASSIGN_PAGE;
  23.113 -		mmuext->arg1.mfn = old_mfn;
  23.114 -		mmuext++;
  23.115 -#endif
  23.116  		mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
  23.117  			MMU_MACHPHYS_UPDATE;
  23.118  		mmu->val = __pa(vdata) >> PAGE_SHIFT;  
  23.119 @@ -303,9 +263,6 @@ static void net_rx_action(unsigned long 
  23.120  
  23.121  		__skb_queue_tail(&rxq, skb);
  23.122  
  23.123 -#ifdef DEBUG_GRANT
  23.124 -		dump_packet('a', old_mfn, vdata);
  23.125 -#endif
  23.126  		/* Filled the batch queue? */
  23.127  		if ((mcl - rx_mcl) == ARRAY_SIZE(rx_mcl))
  23.128  			break;
  23.129 @@ -321,17 +278,12 @@ static void net_rx_action(unsigned long 
  23.130  	mcl->args[3] = DOMID_SELF;
  23.131  	mcl++;
  23.132  
  23.133 -#ifdef CONFIG_XEN_NETDEV_GRANT
  23.134  	mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
  23.135 -#else
  23.136 -	mcl[-3].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
  23.137 -#endif
  23.138  	BUG_ON(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0);
  23.139  
  23.140  	mcl = rx_mcl;
  23.141 -#ifdef CONFIG_XEN_NETDEV_GRANT
  23.142 -	if(HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 
  23.143 -				     gop - grant_rx_op)) { 
  23.144 +	if( HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 
  23.145 +				      gop - grant_rx_op)) { 
  23.146  		/*
  23.147  		 * The other side has given us a bad grant ref, or has no 
  23.148  		 * headroom, or has gone away. Unfortunately the current grant
  23.149 @@ -343,20 +295,14 @@ static void net_rx_action(unsigned long 
  23.150  			grant_rx_op[0].domid, gop - grant_rx_op); 
  23.151  	}
  23.152  	gop = grant_rx_op;
  23.153 -#else
  23.154 -	mmuext = rx_mmuext;
  23.155 -#endif
  23.156 +
  23.157  	while ((skb = __skb_dequeue(&rxq)) != NULL) {
  23.158  		netif   = netdev_priv(skb->dev);
  23.159  		size    = skb->tail - skb->data;
  23.160  
  23.161  		/* Rederive the machine addresses. */
  23.162  		new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
  23.163 -#ifdef CONFIG_XEN_NETDEV_GRANT
  23.164  		old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */
  23.165 -#else
  23.166 -		old_mfn = mmuext[0].arg1.mfn;
  23.167 -#endif
  23.168  		atomic_set(&(skb_shinfo(skb)->dataref), 1);
  23.169  		skb_shinfo(skb)->nr_frags = 0;
  23.170  		skb_shinfo(skb)->frag_list = NULL;
  23.171 @@ -369,27 +315,17 @@ static void net_rx_action(unsigned long 
  23.172  
  23.173  		/* Check the reassignment error code. */
  23.174  		status = NETIF_RSP_OKAY;
  23.175 -#ifdef CONFIG_XEN_NETDEV_GRANT
  23.176  		if(gop->status != 0) { 
  23.177  			DPRINTK("Bad status %d from grant transfer to DOM%u\n",
  23.178  				gop->status, netif->domid);
  23.179  			/* XXX SMH: should free 'old_mfn' here */
  23.180  			status = NETIF_RSP_ERROR; 
  23.181  		} 
  23.182 -#else
  23.183 -		if (unlikely(mcl[1].result != 0)) {
  23.184 -			DPRINTK("Failed MMU update transferring to DOM%u\n",
  23.185 -				netif->domid);
  23.186 -			free_mfn(old_mfn);
  23.187 -			status = NETIF_RSP_ERROR;
  23.188 -		}
  23.189 -#endif
  23.190  		evtchn = netif->evtchn;
  23.191  		id = netif->rx->ring[
  23.192  			MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
  23.193  		if (make_rx_response(netif, id, status,
  23.194 -				     (old_mfn << PAGE_SHIFT) | /* XXX */
  23.195 -				     ((unsigned long)skb->data & ~PAGE_MASK),
  23.196 +				     (unsigned long)skb->data & ~PAGE_MASK,
  23.197  				     size, skb->proto_csum_valid) &&
  23.198  		    (rx_notify[evtchn] == 0)) {
  23.199  			rx_notify[evtchn] = 1;
  23.200 @@ -398,13 +334,8 @@ static void net_rx_action(unsigned long 
  23.201  
  23.202  		netif_put(netif);
  23.203  		dev_kfree_skb(skb);
  23.204 -#ifdef CONFIG_XEN_NETDEV_GRANT
  23.205  		mcl++;
  23.206  		gop++;
  23.207 -#else
  23.208 -		mcl += 2;
  23.209 -		mmuext += 1;
  23.210 -#endif
  23.211  	}
  23.212  
  23.213  	while (notify_nr != 0) {
  23.214 @@ -486,11 +417,7 @@ static void tx_credit_callback(unsigned 
  23.215  
  23.216  inline static void net_tx_action_dealloc(void)
  23.217  {
  23.218 -#ifdef CONFIG_XEN_NETDEV_GRANT
  23.219  	gnttab_unmap_grant_ref_t *gop;
  23.220 -#else
  23.221 -	multicall_entry_t *mcl;
  23.222 -#endif
  23.223  	u16 pending_idx;
  23.224  	PEND_RING_IDX dc, dp;
  23.225  	netif_t *netif;
  23.226 @@ -498,7 +425,6 @@ inline static void net_tx_action_dealloc
  23.227  	dc = dealloc_cons;
  23.228  	dp = dealloc_prod;
  23.229  
  23.230 -#ifdef CONFIG_XEN_NETDEV_GRANT
  23.231  	/*
  23.232  	 * Free up any grants we have finished using
  23.233  	 */
  23.234 @@ -513,26 +439,8 @@ inline static void net_tx_action_dealloc
  23.235  	}
  23.236  	BUG_ON(HYPERVISOR_grant_table_op(
  23.237  		GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops));
  23.238 -#else
  23.239 -	mcl = tx_mcl;
  23.240 -	while (dc != dp) {
  23.241 -		pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
  23.242 -		MULTI_update_va_mapping(mcl, MMAP_VADDR(pending_idx),
  23.243 -					__pte(0), 0);
  23.244 -		mcl++;     
  23.245 -	}
  23.246  
  23.247 -	mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
  23.248 -	BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
  23.249 -
  23.250 -	mcl = tx_mcl;
  23.251 -#endif
  23.252  	while (dealloc_cons != dp) {
  23.253 -#ifndef CONFIG_XEN_NETDEV_GRANT
  23.254 -		/* The update_va_mapping() must not fail. */
  23.255 -		BUG_ON(mcl[0].result != 0);
  23.256 -#endif
  23.257 -
  23.258  		pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
  23.259  
  23.260  		netif = pending_tx_info[pending_idx].netif;
  23.261 @@ -556,10 +464,6 @@ inline static void net_tx_action_dealloc
  23.262  			add_to_net_schedule_list_tail(netif);
  23.263          
  23.264  		netif_put(netif);
  23.265 -
  23.266 -#ifndef CONFIG_XEN_NETDEV_GRANT
  23.267 -		mcl++;
  23.268 -#endif
  23.269  	}
  23.270  }
  23.271  
  23.272 @@ -572,21 +476,13 @@ static void net_tx_action(unsigned long 
  23.273  	netif_tx_request_t txreq;
  23.274  	u16 pending_idx;
  23.275  	NETIF_RING_IDX i;
  23.276 -#ifdef CONFIG_XEN_NETDEV_GRANT
  23.277  	gnttab_map_grant_ref_t *mop;
  23.278 -#else
  23.279 -	multicall_entry_t *mcl;
  23.280 -#endif
  23.281  	unsigned int data_len;
  23.282  
  23.283  	if (dealloc_cons != dealloc_prod)
  23.284  		net_tx_action_dealloc();
  23.285  
  23.286 -#ifdef CONFIG_XEN_NETDEV_GRANT
  23.287  	mop = tx_map_ops;
  23.288 -#else
  23.289 -	mcl = tx_mcl;
  23.290 -#endif
  23.291  	while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
  23.292  		!list_empty(&net_schedule_list)) {
  23.293  		/* Get a netif from the list with work to do. */
  23.294 @@ -657,8 +553,7 @@ static void net_tx_action(unsigned long 
  23.295  		}
  23.296  
  23.297  		/* No crossing a page as the payload mustn't fragment. */
  23.298 -		if (unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >=
  23.299 -			     PAGE_SIZE)) {
  23.300 +		if (unlikely((txreq.offset + txreq.size) >= PAGE_SIZE)) {
  23.301  			DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 
  23.302  				txreq.addr, txreq.size, 
  23.303  				(txreq.addr &~PAGE_MASK) + txreq.size);
  23.304 @@ -682,20 +577,12 @@ static void net_tx_action(unsigned long 
  23.305  
  23.306  		/* Packets passed to netif_rx() must have some headroom. */
  23.307  		skb_reserve(skb, 16);
  23.308 -#ifdef CONFIG_XEN_NETDEV_GRANT
  23.309 +
  23.310  		mop->host_addr = MMAP_VADDR(pending_idx);
  23.311  		mop->dom       = netif->domid;
  23.312 -		mop->ref       = txreq.addr >> PAGE_SHIFT;
  23.313 +		mop->ref       = txreq.gref;
  23.314  		mop->flags     = GNTMAP_host_map | GNTMAP_readonly;
  23.315  		mop++;
  23.316 -#else
  23.317 -		MULTI_update_va_mapping_otherdomain(
  23.318 -			mcl, MMAP_VADDR(pending_idx),
  23.319 -			pfn_pte_ma(txreq.addr >> PAGE_SHIFT, PAGE_KERNEL),
  23.320 -			0, netif->domid);
  23.321 -
  23.322 -		mcl++;
  23.323 -#endif
  23.324  
  23.325  		memcpy(&pending_tx_info[pending_idx].req,
  23.326  		       &txreq, sizeof(txreq));
  23.327 @@ -706,17 +593,10 @@ static void net_tx_action(unsigned long 
  23.328  
  23.329  		pending_cons++;
  23.330  
  23.331 -#ifdef CONFIG_XEN_NETDEV_GRANT
  23.332  		if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
  23.333  			break;
  23.334 -#else
  23.335 -		/* Filled the batch queue? */
  23.336 -		if ((mcl - tx_mcl) == ARRAY_SIZE(tx_mcl))
  23.337 -			break;
  23.338 -#endif
  23.339  	}
  23.340  
  23.341 -#ifdef CONFIG_XEN_NETDEV_GRANT
  23.342  	if (mop == tx_map_ops)
  23.343  		return;
  23.344  
  23.345 @@ -724,14 +604,6 @@ static void net_tx_action(unsigned long 
  23.346  		GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops));
  23.347  
  23.348  	mop = tx_map_ops;
  23.349 -#else
  23.350 -	if (mcl == tx_mcl)
  23.351 -		return;
  23.352 -
  23.353 -	BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
  23.354 -
  23.355 -	mcl = tx_mcl;
  23.356 -#endif
  23.357  	while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
  23.358  		pending_idx = *((u16 *)skb->data);
  23.359  		netif       = pending_tx_info[pending_idx].netif;
  23.360 @@ -739,7 +611,6 @@ static void net_tx_action(unsigned long 
  23.361  		       sizeof(txreq));
  23.362  
  23.363  		/* Check the remap error code. */
  23.364 -#ifdef CONFIG_XEN_NETDEV_GRANT
  23.365  		if (unlikely(mop->handle < 0)) {
  23.366  			printk(KERN_ALERT "#### netback grant fails\n");
  23.367  			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
  23.368 @@ -754,30 +625,13 @@ static void net_tx_action(unsigned long 
  23.369  			__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
  23.370  			FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT);
  23.371  		grant_tx_ref[pending_idx] = mop->handle;
  23.372 -#else
  23.373 -		if (unlikely(mcl[0].result != 0)) {
  23.374 -			DPRINTK("Bad page frame\n");
  23.375 -			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
  23.376 -			netif_put(netif);
  23.377 -			kfree_skb(skb);
  23.378 -			mcl++;
  23.379 -			pending_ring[MASK_PEND_IDX(pending_prod++)] =
  23.380 -				pending_idx;
  23.381 -			continue;
  23.382 -		}
  23.383 -
  23.384 -		phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >>
  23.385 -				       PAGE_SHIFT] =
  23.386 -			FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
  23.387 -#endif
  23.388  
  23.389  		data_len = (txreq.size > PKT_PROT_LEN) ?
  23.390  			PKT_PROT_LEN : txreq.size;
  23.391  
  23.392  		__skb_put(skb, data_len);
  23.393  		memcpy(skb->data, 
  23.394 -		       (void *)(MMAP_VADDR(pending_idx)|
  23.395 -				(txreq.addr&~PAGE_MASK)),
  23.396 +		       (void *)(MMAP_VADDR(pending_idx)|txreq.offset),
  23.397  		       data_len);
  23.398  		if (data_len < txreq.size) {
  23.399  			/* Append the packet payload as a fragment. */
  23.400 @@ -786,7 +640,7 @@ static void net_tx_action(unsigned long 
  23.401  			skb_shinfo(skb)->frags[0].size        =
  23.402  				txreq.size - data_len;
  23.403  			skb_shinfo(skb)->frags[0].page_offset = 
  23.404 -				(txreq.addr + data_len) & ~PAGE_MASK;
  23.405 +				txreq.offset + data_len;
  23.406  			skb_shinfo(skb)->nr_frags = 1;
  23.407  		} else {
  23.408  			/* Schedule a response immediately. */
  23.409 @@ -813,11 +667,7 @@ static void net_tx_action(unsigned long 
  23.410  		netif_rx(skb);
  23.411  		netif->dev->last_rx = jiffies;
  23.412  
  23.413 -#ifdef CONFIG_XEN_NETDEV_GRANT
  23.414  		mop++;
  23.415 -#else
  23.416 -		mcl++;
  23.417 -#endif
  23.418  	}
  23.419  }
  23.420  
  23.421 @@ -874,7 +724,7 @@ static void make_tx_response(netif_t *ne
  23.422  static int make_rx_response(netif_t *netif, 
  23.423                              u16      id, 
  23.424                              s8       st,
  23.425 -                            unsigned long addr,
  23.426 +                            u16      offset,
  23.427                              u16      size,
  23.428                              u16      csum_valid)
  23.429  {
  23.430 @@ -882,7 +732,7 @@ static int make_rx_response(netif_t *net
  23.431  	netif_rx_response_t *resp;
  23.432  
  23.433  	resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
  23.434 -	resp->addr       = addr;
  23.435 +	resp->offset     = offset;
  23.436  	resp->csum_valid = csum_valid;
  23.437  	resp->id         = id;
  23.438  	resp->status     = (s16)size;
  23.439 @@ -937,9 +787,6 @@ static int __init netback_init(void)
  23.440  		return 0;
  23.441  
  23.442  	IPRINTK("Initialising Xen netif backend.\n");
  23.443 -#ifdef CONFIG_XEN_NETDEV_GRANT
  23.444 -	IPRINTK("Using grant tables.\n");
  23.445 -#endif
  23.446  
  23.447  	/* We can increase reservation by this much in net_rx_action(). */
  23.448  	balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
    24.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Thu Sep 22 16:05:44 2005 +0100
    24.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Thu Sep 22 16:12:14 2005 +0100
    24.3 @@ -256,8 +256,8 @@ static void network_tx_buf_gc(struct net
    24.4  		for (i = np->tx_resp_cons; i != prod; i++) {
    24.5  			id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
    24.6  			skb = np->tx_skbs[id];
    24.7 -#ifdef CONFIG_XEN_NETDEV_GRANT
    24.8 -			if (unlikely(gnttab_query_foreign_access(np->grant_tx_ref[id]) != 0)) {
    24.9 +			if (unlikely(gnttab_query_foreign_access(
   24.10 +				np->grant_tx_ref[id]) != 0)) {
   24.11  				printk(KERN_ALERT "network_tx_buf_gc: warning "
   24.12  				       "-- grant still in use by backend "
   24.13  				       "domain.\n");
   24.14 @@ -268,7 +268,6 @@ static void network_tx_buf_gc(struct net
   24.15  			gnttab_release_grant_reference(
   24.16  				&np->gref_tx_head, np->grant_tx_ref[id]);
   24.17  			np->grant_tx_ref[id] = GRANT_INVALID_REF;
   24.18 -#endif
   24.19  			ADD_ID_TO_FREELIST(np->tx_skbs, id);
   24.20  			dev_kfree_skb_irq(skb);
   24.21  		}
   24.22 @@ -287,10 +286,7 @@ static void network_tx_buf_gc(struct net
   24.23  		mb();
   24.24  	} while (prod != np->tx->resp_prod);
   24.25  
   24.26 -#ifdef CONFIG_XEN_NETDEV_GRANT
   24.27   out: 
   24.28 -#endif
   24.29 -
   24.30  	if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
   24.31  		np->tx_full = 0;
   24.32  		if (np->user_state == UST_OPEN)
   24.33 @@ -307,9 +303,7 @@ static void network_alloc_rx_buffers(str
   24.34  	int i, batch_target;
   24.35  	NETIF_RING_IDX req_prod = np->rx->req_prod;
   24.36  	struct xen_memory_reservation reservation;
   24.37 -#ifdef CONFIG_XEN_NETDEV_GRANT
   24.38  	grant_ref_t ref;
   24.39 -#endif
   24.40  
   24.41  	if (unlikely(np->backend_state != BEST_CONNECTED))
   24.42  		return;
   24.43 @@ -343,13 +337,11 @@ static void network_alloc_rx_buffers(str
   24.44  		np->rx_skbs[id] = skb;
   24.45          
   24.46  		np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
   24.47 -#ifdef CONFIG_XEN_NETDEV_GRANT
   24.48  		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
   24.49  		BUG_ON((signed short)ref < 0);
   24.50  		np->grant_rx_ref[id] = ref;
   24.51  		gnttab_grant_foreign_transfer_ref(ref, np->backend_id);
   24.52  		np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref;
   24.53 -#endif
   24.54  		rx_pfn_array[i] = virt_to_mfn(skb->head);
   24.55  
   24.56  		/* Remove this page from map before passing back to Xen. */
   24.57 @@ -400,10 +392,8 @@ static int network_start_xmit(struct sk_
   24.58  	struct net_private *np = netdev_priv(dev);
   24.59  	netif_tx_request_t *tx;
   24.60  	NETIF_RING_IDX i;
   24.61 -#ifdef CONFIG_XEN_NETDEV_GRANT
   24.62  	grant_ref_t ref;
   24.63  	unsigned long mfn;
   24.64 -#endif
   24.65  
   24.66  	if (unlikely(np->tx_full)) {
   24.67  		printk(KERN_ALERT "%s: full queue wasn't stopped!\n",
   24.68 @@ -439,18 +429,13 @@ static int network_start_xmit(struct sk_
   24.69  	tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
   24.70  
   24.71  	tx->id   = id;
   24.72 -#ifdef CONFIG_XEN_NETDEV_GRANT
   24.73  	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
   24.74  	BUG_ON((signed short)ref < 0);
   24.75  	mfn = virt_to_mfn(skb->data);
   24.76  	gnttab_grant_foreign_access_ref(
   24.77  		ref, np->backend_id, mfn, GNTMAP_readonly);
   24.78 -	tx->addr = ref << PAGE_SHIFT;
   24.79 -	np->grant_tx_ref[id] = ref;
   24.80 -#else
   24.81 -	tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
   24.82 -#endif
   24.83 -	tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
   24.84 +	tx->gref = np->grant_tx_ref[id] = ref;
   24.85 +	tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
   24.86  	tx->size = skb->len;
   24.87  	tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
   24.88  
   24.89 @@ -511,10 +496,8 @@ static int netif_poll(struct net_device 
   24.90  	int work_done, budget, more_to_do = 1;
   24.91  	struct sk_buff_head rxq;
   24.92  	unsigned long flags;
   24.93 -#ifdef CONFIG_XEN_NETDEV_GRANT
   24.94  	unsigned long mfn;
   24.95  	grant_ref_t ref;
   24.96 -#endif
   24.97  
   24.98  	spin_lock(&np->rx_lock);
   24.99  
  24.100 @@ -550,7 +533,6 @@ static int netif_poll(struct net_device 
  24.101  			continue;
  24.102  		}
  24.103  
  24.104 -#ifdef CONFIG_XEN_NETDEV_GRANT
  24.105  		ref = np->grant_rx_ref[rx->id]; 
  24.106  
  24.107  		if(ref == GRANT_INVALID_REF) { 
  24.108 @@ -568,17 +550,12 @@ static int netif_poll(struct net_device 
  24.109  		np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
  24.110  		mfn = gnttab_end_foreign_transfer_ref(ref);
  24.111  		gnttab_release_grant_reference(&np->gref_rx_head, ref);
  24.112 -#endif
  24.113  
  24.114  		skb = np->rx_skbs[rx->id];
  24.115  		ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
  24.116  
  24.117  		/* NB. We handle skb overflow later. */
  24.118 -#ifdef CONFIG_XEN_NETDEV_GRANT
  24.119 -		skb->data = skb->head + rx->addr;
  24.120 -#else
  24.121 -		skb->data = skb->head + (rx->addr & ~PAGE_MASK);
  24.122 -#endif
  24.123 +		skb->data = skb->head + rx->offset;
  24.124  		skb->len  = rx->status;
  24.125  		skb->tail = skb->data + skb->len;
  24.126  
  24.127 @@ -589,30 +566,14 @@ static int netif_poll(struct net_device 
  24.128  		np->stats.rx_bytes += rx->status;
  24.129  
  24.130  		/* Remap the page. */
  24.131 -#ifdef CONFIG_XEN_NETDEV_GRANT
  24.132  		mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
  24.133 -#else
  24.134 -		mmu->ptr  = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
  24.135 -#endif
  24.136  		mmu->val  = __pa(skb->head) >> PAGE_SHIFT;
  24.137  		mmu++;
  24.138 -#ifdef CONFIG_XEN_NETDEV_GRANT
  24.139  		MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
  24.140  					pfn_pte_ma(mfn, PAGE_KERNEL), 0);
  24.141 -#else
  24.142 -		MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
  24.143 -					pfn_pte_ma(rx->addr >> PAGE_SHIFT, 
  24.144 -						   PAGE_KERNEL), 0);
  24.145 -#endif
  24.146  		mcl++;
  24.147  
  24.148 -#ifdef CONFIG_XEN_NETDEV_GRANT
  24.149  		phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = mfn;
  24.150 -#else
  24.151 -		phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 
  24.152 -			rx->addr >> PAGE_SHIFT;
  24.153 -#endif 
  24.154 -
  24.155  
  24.156  		__skb_queue_tail(&rxq, skb);
  24.157  	}
  24.158 @@ -773,16 +734,12 @@ static void network_connect(struct net_d
  24.159  			tx = &np->tx->ring[requeue_idx++].req;
  24.160  
  24.161  			tx->id   = i;
  24.162 -#ifdef CONFIG_XEN_NETDEV_GRANT
  24.163  			gnttab_grant_foreign_access_ref(
  24.164  				np->grant_tx_ref[i], np->backend_id, 
  24.165  				virt_to_mfn(np->tx_skbs[i]->data),
  24.166  				GNTMAP_readonly); 
  24.167 -			tx->addr = np->grant_tx_ref[i] << PAGE_SHIFT; 
  24.168 -#else
  24.169 -			tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
  24.170 -#endif
  24.171 -			tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
  24.172 +			tx->gref = np->grant_tx_ref[i];
  24.173 +			tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
  24.174  			tx->size = skb->len;
  24.175  
  24.176  			np->stats.tx_bytes += skb->len;
  24.177 @@ -795,12 +752,10 @@ static void network_connect(struct net_d
  24.178  	/* Rebuild the RX buffer freelist and the RX ring itself. */
  24.179  	for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 
  24.180  		if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET) {
  24.181 -#ifdef CONFIG_XEN_NETDEV_GRANT 
  24.182  			gnttab_grant_foreign_transfer_ref(
  24.183  				np->grant_rx_ref[i], np->backend_id);
  24.184  			np->rx->ring[requeue_idx].req.gref =
  24.185  				np->grant_rx_ref[i];
  24.186 -#endif
  24.187  			np->rx->ring[requeue_idx].req.id = i;
  24.188  			requeue_idx++; 
  24.189  		}
  24.190 @@ -862,11 +817,9 @@ connect_device(struct net_private *np, u
  24.191  
  24.192  static void netif_uninit(struct net_device *dev)
  24.193  {
  24.194 -#ifdef CONFIG_XEN_NETDEV_GRANT
  24.195  	struct net_private *np = netdev_priv(dev);
  24.196  	gnttab_free_grant_references(np->gref_tx_head);
  24.197  	gnttab_free_grant_references(np->gref_rx_head);
  24.198 -#endif
  24.199  }
  24.200  
  24.201  static struct ethtool_ops network_ethtool_ops =
  24.202 @@ -911,19 +864,14 @@ static int create_netdev(int handle, str
  24.203  	/* Initialise {tx,rx}_skbs as a free chain containing every entry. */
  24.204  	for (i = 0; i <= NETIF_TX_RING_SIZE; i++) {
  24.205  		np->tx_skbs[i] = (void *)((unsigned long) i+1);
  24.206 -#ifdef CONFIG_XEN_NETDEV_GRANT
  24.207  		np->grant_tx_ref[i] = GRANT_INVALID_REF;
  24.208 -#endif
  24.209  	}
  24.210  
  24.211  	for (i = 0; i <= NETIF_RX_RING_SIZE; i++) {
  24.212  		np->rx_skbs[i] = (void *)((unsigned long) i+1);
  24.213 -#ifdef CONFIG_XEN_NETDEV_GRANT
  24.214  		np->grant_rx_ref[i] = GRANT_INVALID_REF;
  24.215 -#endif
  24.216  	}
  24.217  
  24.218 -#ifdef CONFIG_XEN_NETDEV_GRANT
  24.219  	/* A grant for every tx ring slot */
  24.220  	if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE,
  24.221  					  &np->gref_tx_head) < 0) {
  24.222 @@ -937,7 +885,6 @@ static int create_netdev(int handle, str
  24.223  		gnttab_free_grant_references(np->gref_tx_head);
  24.224  		goto exit;
  24.225  	}
  24.226 -#endif
  24.227  
  24.228  	netdev->open            = network_open;
  24.229  	netdev->hard_start_xmit = network_start_xmit;
  24.230 @@ -971,10 +918,8 @@ static int create_netdev(int handle, str
  24.231  	return err;
  24.232  
  24.233   exit_free_grefs:
  24.234 -#ifdef CONFIG_XEN_NETDEV_GRANT
  24.235  	gnttab_free_grant_references(np->gref_tx_head);
  24.236  	gnttab_free_grant_references(np->gref_rx_head);
  24.237 -#endif
  24.238  	goto exit;
  24.239  }
  24.240  
  24.241 @@ -1024,10 +969,8 @@ static int setup_device(struct xenbus_de
  24.242  	evtchn_op_t op = { .cmd = EVTCHNOP_alloc_unbound };
  24.243  	int err;
  24.244  
  24.245 -#ifdef CONFIG_XEN_NETDEV_GRANT
  24.246  	info->tx_ring_ref = GRANT_INVALID_REF;
  24.247  	info->rx_ring_ref = GRANT_INVALID_REF;
  24.248 -#endif
  24.249  
  24.250  	info->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
  24.251  	if (info->tx == 0) {
  24.252 @@ -1045,7 +988,6 @@ static int setup_device(struct xenbus_de
  24.253  	memset(info->rx, 0, PAGE_SIZE);
  24.254  	info->backend_state = BEST_DISCONNECTED;
  24.255  
  24.256 -#ifdef CONFIG_XEN_NETDEV_GRANT
  24.257  	err = gnttab_grant_foreign_access(info->backend_id,
  24.258  					  virt_to_mfn(info->tx), 0);
  24.259  	if (err < 0) {
  24.260 @@ -1062,11 +1004,6 @@ static int setup_device(struct xenbus_de
  24.261  	}
  24.262  	info->rx_ring_ref = err;
  24.263  
  24.264 -#else
  24.265 -	info->tx_ring_ref = virt_to_mfn(info->tx);
  24.266 -	info->rx_ring_ref = virt_to_mfn(info->rx);
  24.267 -#endif
  24.268 -
  24.269  	op.u.alloc_unbound.dom = info->backend_id;
  24.270  	err = HYPERVISOR_event_channel_op(&op);
  24.271  	if (err) {
  24.272 @@ -1084,7 +1021,6 @@ static int setup_device(struct xenbus_de
  24.273  		free_page((unsigned long)info->rx);
  24.274  	info->rx = 0;
  24.275  
  24.276 -#ifdef CONFIG_XEN_NETDEV_GRANT
  24.277  	if (info->tx_ring_ref != GRANT_INVALID_REF)
  24.278  		gnttab_end_foreign_access(info->tx_ring_ref, 0);
  24.279  	info->tx_ring_ref = GRANT_INVALID_REF;
  24.280 @@ -1092,7 +1028,6 @@ static int setup_device(struct xenbus_de
  24.281  	if (info->rx_ring_ref != GRANT_INVALID_REF)
  24.282  		gnttab_end_foreign_access(info->rx_ring_ref, 0);
  24.283  	info->rx_ring_ref = GRANT_INVALID_REF;
  24.284 -#endif
  24.285  
  24.286  	return err;
  24.287  }
  24.288 @@ -1106,7 +1041,6 @@ static void netif_free(struct netfront_i
  24.289  		free_page((unsigned long)info->rx);
  24.290  	info->rx = 0;
  24.291  
  24.292 -#ifdef CONFIG_XEN_NETDEV_GRANT
  24.293  	if (info->tx_ring_ref != GRANT_INVALID_REF)
  24.294  		gnttab_end_foreign_access(info->tx_ring_ref, 0);
  24.295  	info->tx_ring_ref = GRANT_INVALID_REF;
  24.296 @@ -1114,7 +1048,6 @@ static void netif_free(struct netfront_i
  24.297  	if (info->rx_ring_ref != GRANT_INVALID_REF)
  24.298  		gnttab_end_foreign_access(info->rx_ring_ref, 0);
  24.299  	info->rx_ring_ref = GRANT_INVALID_REF;
  24.300 -#endif
  24.301  
  24.302  	unbind_evtchn_from_irqhandler(info->evtchn, info->netdev);
  24.303  	info->evtchn = 0;
    25.1 --- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Thu Sep 22 16:05:44 2005 +0100
    25.2 +++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Thu Sep 22 16:12:14 2005 +0100
    25.3 @@ -41,232 +41,253 @@ static struct proc_dir_entry *privcmd_in
    25.4  static int privcmd_ioctl(struct inode *inode, struct file *file,
    25.5                           unsigned int cmd, unsigned long data)
    25.6  {
    25.7 -    int ret = -ENOSYS;
    25.8 +	int ret = -ENOSYS;
    25.9  
   25.10 -    switch ( cmd )
   25.11 -    {
   25.12 -    case IOCTL_PRIVCMD_HYPERCALL:
   25.13 -    {
   25.14 -        privcmd_hypercall_t hypercall;
   25.15 +	switch (cmd) {
   25.16 +	case IOCTL_PRIVCMD_HYPERCALL: {
   25.17 +		privcmd_hypercall_t hypercall;
   25.18    
   25.19 -        if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
   25.20 -            return -EFAULT;
   25.21 +		if (copy_from_user(&hypercall, (void *)data,
   25.22 +				   sizeof(hypercall)))
   25.23 +			return -EFAULT;
   25.24  
   25.25  #if defined(__i386__)
   25.26 -        __asm__ __volatile__ (
   25.27 -            "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
   25.28 -            "movl  4(%%eax),%%ebx ;"
   25.29 -            "movl  8(%%eax),%%ecx ;"
   25.30 -            "movl 12(%%eax),%%edx ;"
   25.31 -            "movl 16(%%eax),%%esi ;"
   25.32 -            "movl 20(%%eax),%%edi ;"
   25.33 -            "movl   (%%eax),%%eax ;"
   25.34 -            TRAP_INSTR "; "
   25.35 -            "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
   25.36 -            : "=a" (ret) : "0" (&hypercall) : "memory" );
   25.37 +		__asm__ __volatile__ (
   25.38 +			"pushl %%ebx; pushl %%ecx; pushl %%edx; "
   25.39 +			"pushl %%esi; pushl %%edi; "
   25.40 +			"movl  4(%%eax),%%ebx ;"
   25.41 +			"movl  8(%%eax),%%ecx ;"
   25.42 +			"movl 12(%%eax),%%edx ;"
   25.43 +			"movl 16(%%eax),%%esi ;"
   25.44 +			"movl 20(%%eax),%%edi ;"
   25.45 +			"movl   (%%eax),%%eax ;"
   25.46 +			TRAP_INSTR "; "
   25.47 +			"popl %%edi; popl %%esi; popl %%edx; "
   25.48 +			"popl %%ecx; popl %%ebx"
   25.49 +			: "=a" (ret) : "0" (&hypercall) : "memory" );
   25.50  #elif defined (__x86_64__)
   25.51 -        {
   25.52 -            long ign1, ign2, ign3;
   25.53 -            __asm__ __volatile__ (
   25.54 -                "movq %8,%%r10; movq %9,%%r8;" TRAP_INSTR
   25.55 -                : "=a" (ret), "=D" (ign1), "=S" (ign2), "=d" (ign3)
   25.56 -                : "0" ((unsigned long)hypercall.op), 
   25.57 -                "1" ((unsigned long)hypercall.arg[0]), 
   25.58 -                "2" ((unsigned long)hypercall.arg[1]),
   25.59 -                "3" ((unsigned long)hypercall.arg[2]), 
   25.60 -                "g" ((unsigned long)hypercall.arg[3]),
   25.61 -                "g" ((unsigned long)hypercall.arg[4])
   25.62 -                : "r11","rcx","r8","r10","memory");
   25.63 -        }
   25.64 +		{
   25.65 +			long ign1, ign2, ign3;
   25.66 +			__asm__ __volatile__ (
   25.67 +				"movq %8,%%r10; movq %9,%%r8;" TRAP_INSTR
   25.68 +				: "=a" (ret), "=D" (ign1),
   25.69 +				  "=S" (ign2), "=d" (ign3)
   25.70 +				: "0" ((unsigned long)hypercall.op), 
   25.71 +				"1" ((unsigned long)hypercall.arg[0]), 
   25.72 +				"2" ((unsigned long)hypercall.arg[1]),
   25.73 +				"3" ((unsigned long)hypercall.arg[2]), 
   25.74 +				"g" ((unsigned long)hypercall.arg[3]),
   25.75 +				"g" ((unsigned long)hypercall.arg[4])
   25.76 +				: "r11","rcx","r8","r10","memory");
   25.77 +		}
   25.78  #elif defined (__ia64__)
   25.79 -       __asm__ __volatile__ (
   25.80 -           ";; mov r14=%2; mov r15=%3; mov r16=%4; mov r17=%5; mov r18=%6;"
   25.81 -           "mov r2=%1; break 0x1000;; mov %0=r8 ;;"
   25.82 -           : "=r" (ret)
   25.83 -           : "r" (hypercall.op),
   25.84 -             "r" (hypercall.arg[0]),
   25.85 -             "r" (hypercall.arg[1]),
   25.86 -             "r" (hypercall.arg[2]),
   25.87 -             "r" (hypercall.arg[3]),
   25.88 -             "r" (hypercall.arg[4])
   25.89 -           : "r14","r15","r16","r17","r18","r2","r8","memory");
   25.90 +		__asm__ __volatile__ (
   25.91 +			";; mov r14=%2; mov r15=%3; "
   25.92 +			"mov r16=%4; mov r17=%5; mov r18=%6;"
   25.93 +			"mov r2=%1; break 0x1000;; mov %0=r8 ;;"
   25.94 +			: "=r" (ret)
   25.95 +			: "r" (hypercall.op),
   25.96 +			"r" (hypercall.arg[0]),
   25.97 +			"r" (hypercall.arg[1]),
   25.98 +			"r" (hypercall.arg[2]),
   25.99 +			"r" (hypercall.arg[3]),
  25.100 +			"r" (hypercall.arg[4])
  25.101 +			: "r14","r15","r16","r17","r18","r2","r8","memory");
  25.102  #endif
  25.103 -    }
  25.104 -    break;
  25.105 +	}
  25.106 +	break;
  25.107  
  25.108  #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
  25.109 -    case IOCTL_PRIVCMD_MMAP:
  25.110 -    {
  25.111 +	case IOCTL_PRIVCMD_MMAP: {
  25.112  #define PRIVCMD_MMAP_SZ 32
  25.113 -        privcmd_mmap_t mmapcmd;
  25.114 -        privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
  25.115 -        int i, rc;
  25.116 -
  25.117 -        if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) )
  25.118 -            return -EFAULT;
  25.119 -
  25.120 -        p = mmapcmd.entry;
  25.121 -
  25.122 -        for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ)
  25.123 -        {
  25.124 -            int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
  25.125 -                PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
  25.126 -
  25.127 -
  25.128 -            if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
  25.129 -                return -EFAULT;
  25.130 -     
  25.131 -            for ( j = 0; j < n; j++ )
  25.132 -            {
  25.133 -                struct vm_area_struct *vma = 
  25.134 -                    find_vma( current->mm, msg[j].va );
  25.135 -
  25.136 -                if ( !vma )
  25.137 -                    return -EINVAL;
  25.138 -
  25.139 -                if ( msg[j].va > PAGE_OFFSET )
  25.140 -                    return -EINVAL;
  25.141 -
  25.142 -                if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
  25.143 -                    return -EINVAL;
  25.144 -
  25.145 -                if ( (rc = direct_remap_pfn_range(vma,
  25.146 -                                                  msg[j].va&PAGE_MASK, 
  25.147 -                                                  msg[j].mfn, 
  25.148 -                                                  msg[j].npages<<PAGE_SHIFT, 
  25.149 -                                                  vma->vm_page_prot,
  25.150 -                                                  mmapcmd.dom)) < 0 )
  25.151 -                    return rc;
  25.152 -            }
  25.153 -        }
  25.154 -        ret = 0;
  25.155 -    }
  25.156 -    break;
  25.157 +		privcmd_mmap_t mmapcmd;
  25.158 +		privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
  25.159 +		int i, rc;
  25.160  
  25.161 -    case IOCTL_PRIVCMD_MMAPBATCH:
  25.162 -    {
  25.163 -        mmu_update_t u;
  25.164 -        privcmd_mmapbatch_t m;
  25.165 -        struct vm_area_struct *vma = NULL;
  25.166 -        unsigned long *p, addr;
  25.167 -        unsigned long mfn, ptep;
  25.168 -        int i;
  25.169 -
  25.170 -        if ( copy_from_user(&m, (void *)data, sizeof(m)) )
  25.171 -        { ret = -EFAULT; goto batch_err; }
  25.172 -
  25.173 -        vma = find_vma( current->mm, m.addr );
  25.174 -
  25.175 -        if ( !vma )
  25.176 -        { ret = -EINVAL; goto batch_err; }
  25.177 -
  25.178 -        if ( m.addr > PAGE_OFFSET )
  25.179 -        { ret = -EFAULT; goto batch_err; }
  25.180 -
  25.181 -        if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
  25.182 -        { ret = -EFAULT; goto batch_err; }
  25.183 +		if (copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)))
  25.184 +			return -EFAULT;
  25.185  
  25.186 -        p = m.arr;
  25.187 -        addr = m.addr;
  25.188 -        for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
  25.189 -        {
  25.190 -            if ( get_user(mfn, p) )
  25.191 -                return -EFAULT;
  25.192 -
  25.193 -            ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
  25.194 -            if (ret)
  25.195 -                goto batch_err;
  25.196 -
  25.197 -            u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
  25.198 -            u.ptr = ptep;
  25.199 +		p = mmapcmd.entry;
  25.200  
  25.201 -            if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0) )
  25.202 -                put_user(0xF0000000 | mfn, p);
  25.203 -        }
  25.204 -
  25.205 -        ret = 0;
  25.206 -        break;
  25.207 +		for (i = 0; i < mmapcmd.num;
  25.208 +		     i += PRIVCMD_MMAP_SZ, p += PRIVCMD_MMAP_SZ) {
  25.209 +			int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
  25.210 +				PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
  25.211  
  25.212 -    batch_err:
  25.213 -        printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n", 
  25.214 -               ret, vma, m.addr, m.num, m.arr,
  25.215 -               vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
  25.216 -        break;
  25.217 -    }
  25.218 -    break;
  25.219 +			if (copy_from_user(&msg, p,
  25.220 +					   n*sizeof(privcmd_mmap_entry_t)))
  25.221 +				return -EFAULT;
  25.222 +     
  25.223 +			for (j = 0; j < n; j++) {
  25.224 +				struct vm_area_struct *vma = 
  25.225 +					find_vma( current->mm, msg[j].va );
  25.226 +
  25.227 +				if (!vma)
  25.228 +					return -EINVAL;
  25.229 +
  25.230 +				if (msg[j].va > PAGE_OFFSET)
  25.231 +					return -EINVAL;
  25.232 +
  25.233 +				if ((msg[j].va + (msg[j].npages << PAGE_SHIFT))
  25.234 +				    > vma->vm_end )
  25.235 +					return -EINVAL;
  25.236 +
  25.237 +				if ((rc = direct_remap_pfn_range(
  25.238 +					vma,
  25.239 +					msg[j].va&PAGE_MASK, 
  25.240 +					msg[j].mfn, 
  25.241 +					msg[j].npages<<PAGE_SHIFT, 
  25.242 +					vma->vm_page_prot,
  25.243 +					mmapcmd.dom)) < 0)
  25.244 +					return rc;
  25.245 +			}
  25.246 +		}
  25.247 +		ret = 0;
  25.248 +	}
  25.249 +	break;
  25.250 +
  25.251 +	case IOCTL_PRIVCMD_MMAPBATCH: {
  25.252 +		mmu_update_t u;
  25.253 +		privcmd_mmapbatch_t m;
  25.254 +		struct vm_area_struct *vma = NULL;
  25.255 +		unsigned long *p, addr;
  25.256 +		unsigned long mfn, ptep;
  25.257 +		int i;
  25.258 +
  25.259 +		if (copy_from_user(&m, (void *)data, sizeof(m))) {
  25.260 +			ret = -EFAULT;
  25.261 +			goto batch_err;
  25.262 +		}
  25.263 +
  25.264 +		vma = find_vma( current->mm, m.addr );
  25.265 +		if (!vma) {
  25.266 +			ret = -EINVAL;
  25.267 +			goto batch_err;
  25.268 +		}
  25.269 +
  25.270 +		if (m.addr > PAGE_OFFSET) {
  25.271 +			ret = -EFAULT;
  25.272 +			goto batch_err;
  25.273 +		}
  25.274 +
  25.275 +		if ((m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end) {
  25.276 +			ret = -EFAULT;
  25.277 +			goto batch_err;
  25.278 +		}
  25.279 +
  25.280 +		p = m.arr;
  25.281 +		addr = m.addr;
  25.282 +		for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
  25.283 +			if (get_user(mfn, p))
  25.284 +				return -EFAULT;
  25.285 +
  25.286 +			ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
  25.287 +			if (ret)
  25.288 +				goto batch_err;
  25.289 +
  25.290 +			u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
  25.291 +			u.ptr = ptep;
  25.292 +
  25.293 +			if (HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0)
  25.294 +				put_user(0xF0000000 | mfn, p);
  25.295 +		}
  25.296 +
  25.297 +		ret = 0;
  25.298 +		break;
  25.299 +
  25.300 +	batch_err:
  25.301 +		printk("batch_err ret=%d vma=%p addr=%lx "
  25.302 +		       "num=%d arr=%p %lx-%lx\n", 
  25.303 +		       ret, vma, m.addr, m.num, m.arr,
  25.304 +		       vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
  25.305 +		break;
  25.306 +	}
  25.307 +	break;
  25.308  #endif
  25.309  
  25.310 -    case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
  25.311 -    {
  25.312 -        unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
  25.313 -        pgd_t *pgd = pgd_offset_k(m2pv);
  25.314 -        pud_t *pud = pud_offset(pgd, m2pv);
  25.315 -        pmd_t *pmd = pmd_offset(pud, m2pv);
  25.316 -        unsigned long m2p_start_mfn = (*(unsigned long *)pmd) >> PAGE_SHIFT; 
  25.317 -        ret = put_user(m2p_start_mfn, (unsigned long *)data) ? -EFAULT: 0;
  25.318 -    }
  25.319 -    break;
  25.320 -
  25.321 -    case IOCTL_PRIVCMD_INITDOMAIN_STORE:
  25.322 -    {
  25.323 -        extern int do_xenbus_probe(void*);
  25.324 -        unsigned long page;
  25.325 -
  25.326 -        if (xen_start_info->store_evtchn != 0) {
  25.327 -            ret = xen_start_info->store_mfn;
  25.328 -            break;
  25.329 -        }
  25.330 +	case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN: {
  25.331 +		unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
  25.332 +		pgd_t *pgd = pgd_offset_k(m2pv);
  25.333 +		pud_t *pud = pud_offset(pgd, m2pv);
  25.334 +		pmd_t *pmd = pmd_offset(pud, m2pv);
  25.335 +		unsigned long m2p_start_mfn =
  25.336 +			(*(unsigned long *)pmd) >> PAGE_SHIFT; 
  25.337 +		ret = put_user(m2p_start_mfn, (unsigned long *)data) ?
  25.338 +			-EFAULT: 0;
  25.339 +	}
  25.340 +	break;
  25.341  
  25.342 -        /* Allocate page. */
  25.343 -        page = get_zeroed_page(GFP_KERNEL);
  25.344 -        if (!page) {
  25.345 -            ret = -ENOMEM;
  25.346 -            break;
  25.347 -        }
  25.348 -
  25.349 -        /* We don't refcnt properly, so set reserved on page.
  25.350 -         * (this allocation is permanent) */
  25.351 -        SetPageReserved(virt_to_page(page));
  25.352 +	case IOCTL_PRIVCMD_INITDOMAIN_STORE: {
  25.353 +		extern int do_xenbus_probe(void*);
  25.354 +		unsigned long page;
  25.355  
  25.356 -        /* Initial connect. Setup channel and page. */
  25.357 -        xen_start_info->store_evtchn = data;
  25.358 -        xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >>
  25.359 -                                              PAGE_SHIFT);
  25.360 -        ret = xen_start_info->store_mfn;
  25.361 +		if (xen_start_info->store_evtchn != 0) {
  25.362 +			ret = xen_start_info->store_mfn;
  25.363 +			break;
  25.364 +		}
  25.365  
  25.366 -        /* We'll return then this will wait for daemon to answer */
  25.367 -        kthread_run(do_xenbus_probe, NULL, "xenbus_probe");
  25.368 -    }
  25.369 -    break;
  25.370 +		/* Allocate page. */
  25.371 +		page = get_zeroed_page(GFP_KERNEL);
  25.372 +		if (!page) {
  25.373 +			ret = -ENOMEM;
  25.374 +			break;
  25.375 +		}
  25.376  
  25.377 -    default:
  25.378 -        ret = -EINVAL;
  25.379 -        break;
  25.380 -    }
  25.381 -    return ret;
  25.382 +		/* We don't refcnt properly, so set reserved on page.
  25.383 +		 * (this allocation is permanent) */
  25.384 +		SetPageReserved(virt_to_page(page));
  25.385 +
  25.386 +		/* Initial connect. Setup channel and page. */
  25.387 +		xen_start_info->store_evtchn = data;
  25.388 +		xen_start_info->store_mfn =
  25.389 +			pfn_to_mfn(virt_to_phys((void *)page) >>
  25.390 +				   PAGE_SHIFT);
  25.391 +		ret = xen_start_info->store_mfn;
  25.392 +
  25.393 +		/* We'll return then this will wait for daemon to answer */
  25.394 +		kthread_run(do_xenbus_probe, NULL, "xenbus_probe");
  25.395 +	}
  25.396 +	break;
  25.397 +
  25.398 +	default:
  25.399 +		ret = -EINVAL;
  25.400 +		break;
  25.401 +	}
  25.402 +
  25.403 +	return ret;
  25.404  }
  25.405  
  25.406  static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
  25.407  {
  25.408 -    /* DONTCOPY is essential for Xen as copy_page_range is broken. */
  25.409 -    vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
  25.410 +	/* DONTCOPY is essential for Xen as copy_page_range is broken. */
  25.411 +	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
  25.412  
  25.413 -    return 0;
  25.414 +	return 0;
  25.415  }
  25.416  
  25.417  static struct file_operations privcmd_file_ops = {
  25.418 -    .ioctl = privcmd_ioctl,
  25.419 -    .mmap  = privcmd_mmap,
  25.420 +	.ioctl = privcmd_ioctl,
  25.421 +	.mmap  = privcmd_mmap,
  25.422  };
  25.423  
  25.424  
  25.425  static int __init privcmd_init(void)
  25.426  {
  25.427 -    privcmd_intf = create_xen_proc_entry("privcmd", 0400);
  25.428 -    if ( privcmd_intf != NULL )
  25.429 -        privcmd_intf->proc_fops = &privcmd_file_ops;
  25.430 +	privcmd_intf = create_xen_proc_entry("privcmd", 0400);
  25.431 +	if (privcmd_intf != NULL)
  25.432 +		privcmd_intf->proc_fops = &privcmd_file_ops;
  25.433  
  25.434 -    return 0;
  25.435 +	return 0;
  25.436  }
  25.437  
  25.438  __initcall(privcmd_init);
  25.439 +
  25.440 +/*
  25.441 + * Local variables:
  25.442 + *  c-file-style: "linux"
  25.443 + *  indent-tabs-mode: t
  25.444 + *  c-indent-level: 8
  25.445 + *  c-basic-offset: 8
  25.446 + *  tab-width: 8
  25.447 + * End:
  25.448 + */
    26.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h	Thu Sep 22 16:05:44 2005 +0100
    26.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h	Thu Sep 22 16:12:14 2005 +0100
    26.3 @@ -84,3 +84,13 @@ extern int num_frontends;
    26.4  #define MMAP_VADDR(t,_req) ((t)->mmap_vstart + ((_req) * PAGE_SIZE))
    26.5  
    26.6  #endif /* __TPMIF__BACKEND__COMMON_H__ */
    26.7 +
    26.8 +/*
    26.9 + * Local variables:
   26.10 + *  c-file-style: "linux"
   26.11 + *  indent-tabs-mode: t
   26.12 + *  c-indent-level: 8
   26.13 + *  c-basic-offset: 8
   26.14 + *  tab-width: 8
   26.15 + * End:
   26.16 + */
    27.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Thu Sep 22 16:05:44 2005 +0100
    27.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Thu Sep 22 16:12:14 2005 +0100
    27.3 @@ -566,7 +566,7 @@ vtpm_op_read(struct file *file,
    27.4  				 * the more time we give the TPM to process the request.
    27.5  				 */
    27.6  				mod_timer(&pak->processing_timer,
    27.7 -				          jiffies + (num_frontends * 10 * HZ));
    27.8 +				          jiffies + (num_frontends * 60 * HZ));
    27.9  				dataex.copied_so_far = 0;
   27.10  			}
   27.11  		}
   27.12 @@ -850,7 +850,7 @@ static int vtpm_queue_packet(struct pack
   27.13  		write_lock_irqsave(&dataex.pak_lock, flags);
   27.14  		list_add_tail(&pak->next, &dataex.pending_pak);
   27.15  		/* give the TPM some time to pick up the request */
   27.16 -		mod_timer(&pak->processing_timer, jiffies + (10 * HZ));
   27.17 +		mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
   27.18  		write_unlock_irqrestore(&dataex.pak_lock,
   27.19  		                        flags);
   27.20  
   27.21 @@ -1075,3 +1075,13 @@ tpmback_init(void)
   27.22  }
   27.23  
   27.24  __initcall(tpmback_init);
   27.25 +
   27.26 +/*
   27.27 + * Local variables:
   27.28 + *  c-file-style: "linux"
   27.29 + *  indent-tabs-mode: t
   27.30 + *  c-indent-level: 8
   27.31 + *  c-basic-offset: 8
   27.32 + *  tab-width: 8
   27.33 + * End:
   27.34 + */
    28.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c	Thu Sep 22 16:05:44 2005 +0100
    28.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c	Thu Sep 22 16:12:14 2005 +0100
    28.3 @@ -268,3 +268,13 @@ void tpmif_xenbus_init(void)
    28.4  {
    28.5  	xenbus_register_backend(&tpmback);
    28.6  }
    28.7 +
    28.8 +/*
    28.9 + * Local variables:
   28.10 + *  c-file-style: "linux"
   28.11 + *  indent-tabs-mode: t
   28.12 + *  c-indent-level: 8
   28.13 + *  c-basic-offset: 8
   28.14 + *  tab-width: 8
   28.15 + * End:
   28.16 + */
    29.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Thu Sep 22 16:05:44 2005 +0100
    29.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Thu Sep 22 16:12:14 2005 +0100
    29.3 @@ -741,3 +741,13 @@ tpmif_init(void)
    29.4  }
    29.5  
    29.6  __initcall(tpmif_init);
    29.7 +
    29.8 +/*
    29.9 + * Local variables:
   29.10 + *  c-file-style: "linux"
   29.11 + *  indent-tabs-mode: t
   29.12 + *  c-indent-level: 8
   29.13 + *  c-basic-offset: 8
   29.14 + *  tab-width: 8
   29.15 + * End:
   29.16 + */
    30.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h	Thu Sep 22 16:05:44 2005 +0100
    30.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h	Thu Sep 22 16:12:14 2005 +0100
    30.3 @@ -38,3 +38,13 @@ struct tx_buffer
    30.4  };
    30.5  
    30.6  #endif
    30.7 +
    30.8 +/*
    30.9 + * Local variables:
   30.10 + *  c-file-style: "linux"
   30.11 + *  indent-tabs-mode: t
   30.12 + *  c-indent-level: 8
   30.13 + *  c-basic-offset: 8
   30.14 + *  tab-width: 8
   30.15 + * End:
   30.16 + */
    31.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbback/common.h	Thu Sep 22 16:05:44 2005 +0100
    31.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    31.3 @@ -1,84 +0,0 @@
    31.4 -
    31.5 -#ifndef __USBIF__BACKEND__COMMON_H__
    31.6 -#define __USBIF__BACKEND__COMMON_H__
    31.7 -
    31.8 -#include <linux/config.h>
    31.9 -#include <linux/version.h>
   31.10 -#include <linux/module.h>
   31.11 -#include <linux/rbtree.h>
   31.12 -#include <linux/interrupt.h>
   31.13 -#include <linux/slab.h>
   31.14 -#include <linux/blkdev.h>
   31.15 -#include <asm/io.h>
   31.16 -#include <asm/setup.h>
   31.17 -#include <asm/pgalloc.h>
   31.18 -#include <asm/hypervisor.h>
   31.19 -#include <asm-xen/driver_util.h>
   31.20 -#include <asm-xen/xen-public/io/usbif.h>
   31.21 -
   31.22 -#if 0
   31.23 -#define ASSERT(_p) \
   31.24 -    if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
   31.25 -    __LINE__, __FILE__); *(int*)0=0; }
   31.26 -#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
   31.27 -                           __FILE__ , __LINE__ , ## _a )
   31.28 -#else
   31.29 -#define ASSERT(_p) ((void)0)
   31.30 -#define DPRINTK(_f, _a...) ((void)0)
   31.31 -#endif
   31.32 -
   31.33 -typedef struct usbif_priv_st usbif_priv_t;
   31.34 -
   31.35 -struct usbif_priv_st {
   31.36 -    /* Unique identifier for this interface. */
   31.37 -    domid_t          domid;
   31.38 -    unsigned int     handle;
   31.39 -    /* Physical parameters of the comms window. */
   31.40 -    unsigned long    shmem_frame;
   31.41 -    unsigned int     evtchn;
   31.42 -    /* Comms Information */
   31.43 -    usbif_back_ring_t usb_ring;
   31.44 -    struct vm_struct *usb_ring_area;
   31.45 -    /* Private fields. */
   31.46 -    enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
   31.47 -    /*
   31.48 -     * DISCONNECT response is deferred until pending requests are ack'ed.
   31.49 -     * We therefore need to store the id from the original request.
   31.50 -     */
   31.51 -    u8                   disconnect_rspid;
   31.52 -    usbif_priv_t        *hash_next;
   31.53 -    struct list_head     usbif_list;
   31.54 -    spinlock_t           usb_ring_lock;
   31.55 -    atomic_t             refcnt;
   31.56 -
   31.57 -    struct work_struct work;
   31.58 -};
   31.59 -
   31.60 -void usbif_create(usbif_be_create_t *create);
   31.61 -void usbif_destroy(usbif_be_destroy_t *destroy);
   31.62 -void usbif_connect(usbif_be_connect_t *connect);
   31.63 -int  usbif_disconnect(usbif_be_disconnect_t *disconnect, u8 rsp_id);
   31.64 -void usbif_disconnect_complete(usbif_priv_t *up);
   31.65 -
   31.66 -void usbif_release_port(usbif_be_release_port_t *msg);
   31.67 -int usbif_claim_port(usbif_be_claim_port_t *msg);
   31.68 -void usbif_release_ports(usbif_priv_t *up);
   31.69 -
   31.70 -usbif_priv_t *usbif_find(domid_t domid);
   31.71 -#define usbif_get(_b) (atomic_inc(&(_b)->refcnt))
   31.72 -#define usbif_put(_b)                             \
   31.73 -    do {                                          \
   31.74 -        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
   31.75 -            usbif_disconnect_complete(_b);        \
   31.76 -    } while (0)
   31.77 -
   31.78 -
   31.79 -void usbif_interface_init(void);
   31.80 -void usbif_ctrlif_init(void);
   31.81 -
   31.82 -void usbif_deschedule(usbif_priv_t *up);
   31.83 -void remove_from_usbif_list(usbif_priv_t *up);
   31.84 -
   31.85 -irqreturn_t usbif_be_int(int irq, void *dev_id, struct pt_regs *regs);
   31.86 -
   31.87 -#endif /* __USBIF__BACKEND__COMMON_H__ */
    32.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbback/control.c	Thu Sep 22 16:05:44 2005 +0100
    32.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    32.3 @@ -1,61 +0,0 @@
    32.4 -/******************************************************************************
    32.5 - * arch/xen/drivers/usbif/backend/control.c
    32.6 - * 
    32.7 - * Routines for interfacing with the control plane.
    32.8 - * 
    32.9 - * Copyright (c) 2004, Keir Fraser
   32.10 - */
   32.11 -
   32.12 -#include "common.h"
   32.13 -
   32.14 -static void usbif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
   32.15 -{
   32.16 -    DPRINTK("Received usbif backend message, subtype=%d\n", msg->subtype);
   32.17 -    
   32.18 -    switch ( msg->subtype )
   32.19 -    {
   32.20 -    case CMSG_USBIF_BE_CREATE:
   32.21 -        usbif_create((usbif_be_create_t *)&msg->msg[0]);
   32.22 -        break;        
   32.23 -    case CMSG_USBIF_BE_DESTROY:
   32.24 -        usbif_destroy((usbif_be_destroy_t *)&msg->msg[0]);
   32.25 -        break;        
   32.26 -    case CMSG_USBIF_BE_CONNECT:
   32.27 -        usbif_connect((usbif_be_connect_t *)&msg->msg[0]);
   32.28 -        break;        
   32.29 -    case CMSG_USBIF_BE_DISCONNECT:
   32.30 -        if ( !usbif_disconnect((usbif_be_disconnect_t *)&msg->msg[0],msg->id) )
   32.31 -            return; /* Sending the response is deferred until later. */
   32.32 -        break;        
   32.33 -    case CMSG_USBIF_BE_CLAIM_PORT:
   32.34 -	usbif_claim_port((usbif_be_claim_port_t *)&msg->msg[0]);
   32.35 -        break;
   32.36 -    case CMSG_USBIF_BE_RELEASE_PORT:
   32.37 -        usbif_release_port((usbif_be_release_port_t *)&msg->msg[0]);
   32.38 -        break;
   32.39 -    default:
   32.40 -        DPRINTK("Parse error while reading message subtype %d, len %d\n",
   32.41 -                msg->subtype, msg->length);
   32.42 -        msg->length = 0;
   32.43 -        break;
   32.44 -    }
   32.45 -
   32.46 -    ctrl_if_send_response(msg);
   32.47 -}
   32.48 -
   32.49 -void usbif_ctrlif_init(void)
   32.50 -{
   32.51 -    ctrl_msg_t                       cmsg;
   32.52 -    usbif_be_driver_status_changed_t st;
   32.53 -
   32.54 -    (void)ctrl_if_register_receiver(CMSG_USBIF_BE, usbif_ctrlif_rx, 
   32.55 -                                    CALLBACK_IN_BLOCKING_CONTEXT);
   32.56 -
   32.57 -    /* Send a driver-UP notification to the domain controller. */
   32.58 -    cmsg.type      = CMSG_USBIF_BE;
   32.59 -    cmsg.subtype   = CMSG_USBIF_BE_DRIVER_STATUS_CHANGED;
   32.60 -    cmsg.length    = sizeof(usbif_be_driver_status_changed_t);
   32.61 -    st.status      = USBIF_DRIVER_STATUS_UP;
   32.62 -    memcpy(cmsg.msg, &st, sizeof(st));
   32.63 -    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
   32.64 -}
    33.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbback/interface.c	Thu Sep 22 16:05:44 2005 +0100
    33.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    33.3 @@ -1,241 +0,0 @@
    33.4 -/******************************************************************************
    33.5 - * arch/xen/drivers/usbif/backend/interface.c
    33.6 - * 
    33.7 - * USB device interface management.
    33.8 - * 
    33.9 - * by Mark Williamson, Copyright (c) 2004
   33.10 - */
   33.11 -
   33.12 -#include "common.h"
   33.13 -
   33.14 -#define USBIF_HASHSZ 1024
   33.15 -#define USBIF_HASH(_d) (((int)(_d))&(USBIF_HASHSZ-1))
   33.16 -
   33.17 -static kmem_cache_t      *usbif_priv_cachep;
   33.18 -static usbif_priv_t      *usbif_priv_hash[USBIF_HASHSZ];
   33.19 -
   33.20 -usbif_priv_t *usbif_find(domid_t domid)
   33.21 -{
   33.22 -    usbif_priv_t *up = usbif_priv_hash[USBIF_HASH(domid)];
   33.23 -    while ( (up != NULL ) && ( up->domid != domid ) )
   33.24 -        up = up->hash_next;
   33.25 -    return up;
   33.26 -}
   33.27 -
   33.28 -static void __usbif_disconnect_complete(void *arg)
   33.29 -{
   33.30 -    usbif_priv_t         *usbif = (usbif_priv_t *)arg;
   33.31 -    ctrl_msg_t            cmsg;
   33.32 -    usbif_be_disconnect_t disc;
   33.33 -
   33.34 -    /*
   33.35 -     * These can't be done in usbif_disconnect() because at that point there
   33.36 -     * may be outstanding requests at the device whose asynchronous responses
   33.37 -     * must still be notified to the remote driver.
   33.38 -     */
   33.39 -    free_vm_area(usbif->usb_ring_area);
   33.40 -
   33.41 -    /* Construct the deferred response message. */
   33.42 -    cmsg.type         = CMSG_USBIF_BE;
   33.43 -    cmsg.subtype      = CMSG_USBIF_BE_DISCONNECT;
   33.44 -    cmsg.id           = usbif->disconnect_rspid;
   33.45 -    cmsg.length       = sizeof(usbif_be_disconnect_t);
   33.46 -    disc.domid        = usbif->domid;
   33.47 -    disc.status       = USBIF_BE_STATUS_OKAY;
   33.48 -    memcpy(cmsg.msg, &disc, sizeof(disc));
   33.49 -
   33.50 -    /*
   33.51 -     * Make sure message is constructed /before/ status change, because
   33.52 -     * after the status change the 'usbif' structure could be deallocated at
   33.53 -     * any time. Also make sure we send the response /after/ status change,
   33.54 -     * as otherwise a subsequent CONNECT request could spuriously fail if
   33.55 -     * another CPU doesn't see the status change yet.
   33.56 -     */
   33.57 -    mb();
   33.58 -    if ( usbif->status != DISCONNECTING )
   33.59 -        BUG();
   33.60 -    usbif->status = DISCONNECTED;
   33.61 -    mb();
   33.62 -
   33.63 -    /* Send the successful response. */
   33.64 -    ctrl_if_send_response(&cmsg);
   33.65 -}
   33.66 -
   33.67 -void usbif_disconnect_complete(usbif_priv_t *up)
   33.68 -{
   33.69 -    INIT_WORK(&up->work, __usbif_disconnect_complete, (void *)up);
   33.70 -    schedule_work(&up->work);
   33.71 -}
   33.72 -
   33.73 -void usbif_create(usbif_be_create_t *create)
   33.74 -{
   33.75 -    domid_t       domid  = create->domid;
   33.76 -    usbif_priv_t **pup, *up;
   33.77 -
   33.78 -    if ( (up = kmem_cache_alloc(usbif_priv_cachep, GFP_KERNEL)) == NULL )
   33.79 -    {
   33.80 -        DPRINTK("Could not create usbif: out of memory\n");
   33.81 -        create->status = USBIF_BE_STATUS_OUT_OF_MEMORY;
   33.82 -        return;
   33.83 -    }
   33.84 -
   33.85 -    memset(up, 0, sizeof(*up));
   33.86 -    up->domid  = domid;
   33.87 -    up->status = DISCONNECTED;
   33.88 -    spin_lock_init(&up->usb_ring_lock);
   33.89 -    atomic_set(&up->refcnt, 0);
   33.90 -
   33.91 -    pup = &usbif_priv_hash[USBIF_HASH(domid)];
   33.92 -    while ( *pup != NULL )
   33.93 -    {
   33.94 -        if ( (*pup)->domid == domid )
   33.95 -        {
   33.96 -            create->status = USBIF_BE_STATUS_INTERFACE_EXISTS;
   33.97 -            kmem_cache_free(usbif_priv_cachep, up);
   33.98 -            return;
   33.99 -        }
  33.100 -        pup = &(*pup)->hash_next;
  33.101 -    }
  33.102 -
  33.103 -    up->hash_next = *pup;
  33.104 -    *pup = up;
  33.105 -
  33.106 -    create->status = USBIF_BE_STATUS_OKAY;
  33.107 -}
  33.108 -
  33.109 -void usbif_destroy(usbif_be_destroy_t *destroy)
  33.110 -{
  33.111 -    domid_t       domid  = destroy->domid;
  33.112 -    usbif_priv_t  **pup, *up;
  33.113 -
  33.114 -    pup = &usbif_priv_hash[USBIF_HASH(domid)];
  33.115 -    while ( (up = *pup) != NULL )
  33.116 -    {
  33.117 -        if ( up->domid == domid )
  33.118 -        {
  33.119 -            if ( up->status != DISCONNECTED )
  33.120 -                goto still_connected;
  33.121 -            goto destroy;
  33.122 -        }
  33.123 -        pup = &up->hash_next;
  33.124 -    }
  33.125 -
  33.126 -    destroy->status = USBIF_BE_STATUS_INTERFACE_NOT_FOUND;
  33.127 -    return;
  33.128 -
  33.129 - still_connected:
  33.130 -    destroy->status = USBIF_BE_STATUS_INTERFACE_CONNECTED;
  33.131 -    return;
  33.132 -
  33.133 - destroy:
  33.134 -    *pup = up->hash_next;
  33.135 -    usbif_release_ports(up);
  33.136 -    kmem_cache_free(usbif_priv_cachep, up);
  33.137 -    destroy->status = USBIF_BE_STATUS_OKAY;
  33.138 -}
  33.139 -
  33.140 -void usbif_connect(usbif_be_connect_t *connect)
  33.141 -{
  33.142 -    domid_t       domid  = connect->domid;
  33.143 -    unsigned int  evtchn = connect->evtchn;
  33.144 -    unsigned long shmem_frame = connect->shmem_frame;
  33.145 -    pgprot_t      prot;
  33.146 -    int           error;
  33.147 -    usbif_priv_t *up;
  33.148 -    usbif_sring_t *sring;
  33.149 -
  33.150 -    up = usbif_find(domid);
  33.151 -    if ( unlikely(up == NULL) )
  33.152 -    {
  33.153 -        DPRINTK("usbif_connect attempted for non-existent usbif (%u)\n", 
  33.154 -                connect->domid); 
  33.155 -        connect->status = USBIF_BE_STATUS_INTERFACE_NOT_FOUND;
  33.156 -        return;
  33.157 -    }
  33.158 -
  33.159 -    if ( (up->usb_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
  33.160 -    {
  33.161 -        connect->status = USBIF_BE_STATUS_OUT_OF_MEMORY;
  33.162 -        return;
  33.163 -    }
  33.164 -
  33.165 -    prot = __pgprot(_KERNPG_TABLE);
  33.166 -    error = direct_remap_pfn_range(&init_mm, AREALLOC_AREADDR(area->addr),
  33.167 -                                    shmem_frame, PAGE_SIZE,
  33.168 -                                    prot, domid);
  33.169 -    if ( error != 0 )
  33.170 -    {
  33.171 -        if ( error == -ENOMEM )
  33.172 -            connect->status = USBIF_BE_STATUS_OUT_OF_MEMORY;
  33.173 -        else if ( error == -EFAULT )
  33.174 -            connect->status = USBIF_BE_STATUS_MAPPING_ERROR;
  33.175 -        else
  33.176 -            connect->status = USBIF_BE_STATUS_ERROR;
  33.177 -        free_vm_area(up->usb_ring_area);
  33.178 -        return;
  33.179 -    }
  33.180 -
  33.181 -    if ( up->status != DISCONNECTED )
  33.182 -    {
  33.183 -        connect->status = USBIF_BE_STATUS_INTERFACE_CONNECTED;
  33.184 -        free_vm_area(up->usb_ring_area);
  33.185 -        return;
  33.186 -    }
  33.187 -
  33.188 -    sring = (usbif_sring_t *)area->addr;
  33.189 -    SHARED_RING_INIT(sring);
  33.190 -    BACK_RING_INIT(&up->usb_ring, sring, PAGE_SIZE);
  33.191 -
  33.192 -    up->evtchn        = evtchn;
  33.193 -    up->shmem_frame   = shmem_frame;
  33.194 -    up->status        = CONNECTED;
  33.195 -    usbif_get(up);
  33.196 -
  33.197 -    (void)bind_evtchn_to_irqhandler(
  33.198 -        evtchn, usbif_be_int, 0, "usbif-backend", up);
  33.199 -
  33.200 -    connect->status = USBIF_BE_STATUS_OKAY;
  33.201 -}
  33.202 -
  33.203 -/* Remove URBs for this interface before destroying it. */
  33.204 -void usbif_deschedule(usbif_priv_t *up)
  33.205 -{
  33.206 -    remove_from_usbif_list(up);
  33.207 -}
  33.208 -
  33.209 -int usbif_disconnect(usbif_be_disconnect_t *disconnect, u8 rsp_id)
  33.210 -{
  33.211 -    domid_t       domid  = disconnect->domid;
  33.212 -    usbif_priv_t *up;
  33.213 -
  33.214 -    up = usbif_find(domid);
  33.215 -    if ( unlikely(up == NULL) )
  33.216 -    {
  33.217 -        DPRINTK("usbif_disconnect attempted for non-existent usbif"
  33.218 -                " (%u)\n", disconnect->domid); 
  33.219 -        disconnect->status = USBIF_BE_STATUS_INTERFACE_NOT_FOUND;
  33.220 -        return 1; /* Caller will send response error message. */
  33.221 -    }
  33.222 -
  33.223 -    if ( up->status == CONNECTED )
  33.224 -    {
  33.225 -        up->status = DISCONNECTING;
  33.226 -        up->disconnect_rspid = rsp_id;
  33.227 -        wmb(); /* Let other CPUs see the status change. */
  33.228 -        unbind_evtchn_from_irqhandler(up->evtchn, up);
  33.229 -	usbif_deschedule(up);
  33.230 -        usbif_put(up);
  33.231 -        return 0; /* Caller should not send response message. */
  33.232 -    }
  33.233 -
  33.234 -    disconnect->status = USBIF_BE_STATUS_OKAY;
  33.235 -    return 1;
  33.236 -}
  33.237 -
  33.238 -void __init usbif_interface_init(void)
  33.239 -{
  33.240 -    usbif_priv_cachep = kmem_cache_create("usbif_priv_cache",
  33.241 -					  sizeof(usbif_priv_t), 
  33.242 -					  0, 0, NULL, NULL);
  33.243 -    memset(usbif_priv_hash, 0, sizeof(usbif_priv_hash));
  33.244 -}
    34.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbback/usbback.c	Thu Sep 22 16:05:44 2005 +0100
    34.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    34.3 @@ -1,1068 +0,0 @@
    34.4 -/******************************************************************************
    34.5 - * arch/xen/drivers/usbif/backend/main.c
    34.6 - * 
    34.7 - * Backend for the Xen virtual USB driver - provides an abstraction of a
    34.8 - * USB host controller to the corresponding frontend driver.
    34.9 - *
   34.10 - * by Mark Williamson
   34.11 - * Copyright (c) 2004 Intel Research Cambridge
   34.12 - * Copyright (c) 2004, 2005 Mark Williamson
   34.13 - *
   34.14 - * Based on arch/xen/drivers/blkif/backend/main.c
   34.15 - * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
   34.16 - */
   34.17 -
   34.18 -#include "common.h"
   34.19 -
   34.20 -
   34.21 -#include <linux/list.h>
   34.22 -#include <linux/usb.h>
   34.23 -#include <linux/spinlock.h>
   34.24 -#include <linux/module.h>
   34.25 -#include <linux/tqueue.h>
   34.26 -
   34.27 -/*
   34.28 - * This is rather arbitrary.
   34.29 - */
   34.30 -#define MAX_PENDING_REQS 4
   34.31 -#define BATCH_PER_DOMAIN 1
   34.32 -
   34.33 -static unsigned long mmap_vstart;
   34.34 -
   34.35 -/* Needs to be sufficiently large that we can map the (large) buffers
   34.36 - * the USB mass storage driver wants. */
   34.37 -#define MMAP_PAGES_PER_REQUEST \
   34.38 -    (128)
   34.39 -#define MMAP_PAGES             \
   34.40 -    (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
   34.41 -
   34.42 -#define MMAP_VADDR(_req,_seg)                        \
   34.43 -    (mmap_vstart +                                   \
   34.44 -     ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
   34.45 -     ((_seg) * PAGE_SIZE))
   34.46 -
   34.47 -
   34.48 -static spinlock_t owned_ports_lock;
   34.49 -LIST_HEAD(owned_ports);
   34.50 -
   34.51 -/* A list of these structures is used to track ownership of physical USB
   34.52 - * ports. */
   34.53 -typedef struct 
   34.54 -{
   34.55 -    usbif_priv_t     *usbif_priv;
   34.56 -    char             path[16];
   34.57 -    int               guest_port;
   34.58 -    int enabled;
   34.59 -    struct list_head  list;
   34.60 -    unsigned long guest_address; /* The USB device address that has been
   34.61 -                                  * assigned by the guest. */
   34.62 -    int               dev_present; /* Is there a device present? */
   34.63 -    struct usb_device * dev;
   34.64 -    unsigned long ifaces;  /* What interfaces are present on this device? */
   34.65 -} owned_port_t;
   34.66 -
   34.67 -
   34.68 -/*
   34.69 - * Each outstanding request that we've passed to the lower device layers has a
   34.70 - * 'pending_req' allocated to it.  The request is complete, the specified
   34.71 - * domain has a response queued for it, with the saved 'id' passed back.
   34.72 - */
   34.73 -typedef struct {
   34.74 -    usbif_priv_t       *usbif_priv;
   34.75 -    unsigned long      id;
   34.76 -    int                nr_pages;
   34.77 -    unsigned short     operation;
   34.78 -    int                status;
   34.79 -} pending_req_t;
   34.80 -
   34.81 -/*
   34.82 - * We can't allocate pending_req's in order, since they may complete out of 
   34.83 - * order. We therefore maintain an allocation ring. This ring also indicates 
   34.84 - * when enough work has been passed down -- at that point the allocation ring 
   34.85 - * will be empty.
   34.86 - */
   34.87 -static pending_req_t pending_reqs[MAX_PENDING_REQS];
   34.88 -static unsigned char pending_ring[MAX_PENDING_REQS];
   34.89 -static spinlock_t pend_prod_lock;
   34.90 -
   34.91 -/* NB. We use a different index type to differentiate from shared usb rings. */
   34.92 -typedef unsigned int PEND_RING_IDX;
   34.93 -#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
   34.94 -static PEND_RING_IDX pending_prod, pending_cons;
   34.95 -#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
   34.96 -
   34.97 -static int do_usb_io_op(usbif_priv_t *usbif, int max_to_do);
   34.98 -static void make_response(usbif_priv_t *usbif, unsigned long id, 
   34.99 -                          unsigned short op, int st, int inband,
  34.100 -			  unsigned long actual_length);
  34.101 -static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned long port);
  34.102 -static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req);    
  34.103 -static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid);
  34.104 -static owned_port_t *usbif_find_port(char *);
  34.105 -
  34.106 -/******************************************************************
  34.107 - * PRIVATE DEBUG FUNCTIONS
  34.108 - */
  34.109 -
  34.110 -#undef DEBUG
  34.111 -#ifdef DEBUG
  34.112 -
  34.113 -static void dump_port(owned_port_t *p)
  34.114 -{
  34.115 -    printk(KERN_DEBUG "owned_port_t @ %p\n"
  34.116 -	   "  usbif_priv @ %p\n"
  34.117 -	   "  path: %s\n"
  34.118 -	   "  guest_port: %d\n"
  34.119 -	   "  guest_address: %ld\n"
  34.120 -	   "  dev_present: %d\n"
  34.121 -	   "  dev @ %p\n"
  34.122 -	   "  ifaces: 0x%lx\n",
  34.123 -	   p, p->usbif_priv, p->path, p->guest_port, p->guest_address,
  34.124 -	   p->dev_present, p->dev, p->ifaces);
  34.125 -}
  34.126 -
  34.127 -
  34.128 -static void dump_request(usbif_request_t *req)
  34.129 -{    
  34.130 -    printk(KERN_DEBUG "id = 0x%lx\n"
  34.131 -	   "devnum %d\n"
  34.132 -	   "endpoint 0x%x\n"
  34.133 -	   "direction %d\n"
  34.134 -	   "speed %d\n"
  34.135 -	   "pipe_type 0x%x\n"
  34.136 -	   "transfer_buffer 0x%lx\n"
  34.137 -	   "length 0x%lx\n"
  34.138 -	   "transfer_flags 0x%lx\n"
  34.139 -	   "setup = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n"
  34.140 -	   "iso_schedule = 0x%lx\n"
  34.141 -	   "num_iso %ld\n",
  34.142 -	   req->id, req->devnum, req->endpoint, req->direction, req->speed,
  34.143 -	   req->pipe_type, req->transfer_buffer, req->length,
  34.144 -	   req->transfer_flags, req->setup[0], req->setup[1], req->setup[2],
  34.145 -	   req->setup[3], req->setup[4], req->setup[5], req->setup[6],
  34.146 -	   req->setup[7], req->iso_schedule, req->num_iso);
  34.147 -}
  34.148 -
  34.149 -static void dump_urb(struct urb *urb)
  34.150 -{
  34.151 -    printk(KERN_DEBUG "dumping urb @ %p\n", urb);
  34.152 -
  34.153 -#define DUMP_URB_FIELD(name, format) \
  34.154 -    printk(KERN_DEBUG "  " # name " " format "\n", urb-> name)
  34.155 -    
  34.156 -    DUMP_URB_FIELD(pipe, "0x%x");
  34.157 -    DUMP_URB_FIELD(status, "%d");
  34.158 -    DUMP_URB_FIELD(transfer_flags, "0x%x");    
  34.159 -    DUMP_URB_FIELD(transfer_buffer, "%p");
  34.160 -    DUMP_URB_FIELD(transfer_buffer_length, "%d");
  34.161 -    DUMP_URB_FIELD(actual_length, "%d");
  34.162 -}
  34.163 -
  34.164 -static void dump_response(usbif_response_t *resp)
  34.165 -{
  34.166 -    printk(KERN_DEBUG "usbback: Sending response:\n"
  34.167 -	   "         id = 0x%x\n"
  34.168 -	   "         op = %d\n"
  34.169 -	   "         status = %d\n"
  34.170 -	   "         data = %d\n"
  34.171 -	   "         length = %d\n",
  34.172 -	   resp->id, resp->op, resp->status, resp->data, resp->length);
  34.173 -}
  34.174 -
  34.175 -#else /* DEBUG */
  34.176 -
  34.177 -#define dump_port(blah)     ((void)0)
  34.178 -#define dump_request(blah)   ((void)0)
  34.179 -#define dump_urb(blah)      ((void)0)
  34.180 -#define dump_response(blah) ((void)0)
  34.181 -
  34.182 -#endif /* DEBUG */
  34.183 -
  34.184 -/******************************************************************
  34.185 - * MEMORY MANAGEMENT
  34.186 - */
  34.187 -
  34.188 -static void fast_flush_area(int idx, int nr_pages)
  34.189 -{
  34.190 -    multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
  34.191 -    int               i;
  34.192 -
  34.193 -    for ( i = 0; i < nr_pages; i++ )
  34.194 -    {
  34.195 -	MULTI_update_va_mapping(mcl+i, MMAP_VADDR(idx, i),
  34.196 -				__pte(0), 0);
  34.197 -    }
  34.198 -
  34.199 -    mcl[nr_pages-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
  34.200 -    if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
  34.201 -        BUG();
  34.202 -}
  34.203 -
  34.204 -
  34.205 -/******************************************************************
  34.206 - * USB INTERFACE SCHEDULER LIST MAINTENANCE
  34.207 - */
  34.208 -
  34.209 -static struct list_head usbio_schedule_list;
  34.210 -static spinlock_t usbio_schedule_list_lock;
  34.211 -
  34.212 -static int __on_usbif_list(usbif_priv_t *up)
  34.213 -{
  34.214 -    return up->usbif_list.next != NULL;
  34.215 -}
  34.216 -
  34.217 -void remove_from_usbif_list(usbif_priv_t *up)
  34.218 -{
  34.219 -    unsigned long flags;
  34.220 -    if ( !__on_usbif_list(up) ) return;
  34.221 -    spin_lock_irqsave(&usbio_schedule_list_lock, flags);
  34.222 -    if ( __on_usbif_list(up) )
  34.223 -    {
  34.224 -        list_del(&up->usbif_list);
  34.225 -        up->usbif_list.next = NULL;
  34.226 -        usbif_put(up);
  34.227 -    }
  34.228 -    spin_unlock_irqrestore(&usbio_schedule_list_lock, flags);
  34.229 -}
  34.230 -
  34.231 -static void add_to_usbif_list_tail(usbif_priv_t *up)
  34.232 -{
  34.233 -    unsigned long flags;
  34.234 -    if ( __on_usbif_list(up) ) return;
  34.235 -    spin_lock_irqsave(&usbio_schedule_list_lock, flags);
  34.236 -    if ( !__on_usbif_list(up) && (up->status == CONNECTED) )
  34.237 -    {
  34.238 -        list_add_tail(&up->usbif_list, &usbio_schedule_list);
  34.239 -        usbif_get(up);
  34.240 -    }
  34.241 -    spin_unlock_irqrestore(&usbio_schedule_list_lock, flags);
  34.242 -}
  34.243 -
  34.244 -void free_pending(int pending_idx)
  34.245 -{
  34.246 -    unsigned long flags;
  34.247 -
  34.248 -    /* Free the pending request. */
  34.249 -    spin_lock_irqsave(&pend_prod_lock, flags);
  34.250 -    pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
  34.251 -    spin_unlock_irqrestore(&pend_prod_lock, flags);
  34.252 -}
  34.253 -
  34.254 -/******************************************************************
  34.255 - * COMPLETION CALLBACK -- Called as urb->complete()
  34.256 - */
  34.257 -
  34.258 -static void maybe_trigger_usbio_schedule(void);
  34.259 -
  34.260 -static void __end_usb_io_op(struct urb *purb)
  34.261 -{
  34.262 -    pending_req_t *pending_req;
  34.263 -    int pending_idx;
  34.264 -
  34.265 -    pending_req = purb->context;
  34.266 -
  34.267 -    pending_idx = pending_req - pending_reqs;
  34.268 -
  34.269 -    ASSERT(purb->actual_length <= purb->transfer_buffer_length);
  34.270 -    ASSERT(purb->actual_length <= pending_req->nr_pages * PAGE_SIZE);
  34.271 -    
  34.272 -    /* An error fails the entire request. */
  34.273 -    if ( purb->status )
  34.274 -    {
  34.275 -        printk(KERN_WARNING "URB @ %p failed. Status %d\n", purb, purb->status);
  34.276 -    }
  34.277 -
  34.278 -    if ( usb_pipetype(purb->pipe) == 0 )
  34.279 -    {
  34.280 -        int i;
  34.281 -        usbif_iso_t *sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, pending_req->nr_pages - 1);
  34.282 -
  34.283 -        /* If we're dealing with an iso pipe, we need to copy back the schedule. */
  34.284 -        for ( i = 0; i < purb->number_of_packets; i++ )
  34.285 -        {
  34.286 -            sched[i].length = purb->iso_frame_desc[i].actual_length;
  34.287 -            ASSERT(sched[i].buffer_offset ==
  34.288 -                   purb->iso_frame_desc[i].offset);
  34.289 -            sched[i].status = purb->iso_frame_desc[i].status;
  34.290 -        }
  34.291 -    }
  34.292 -    
  34.293 -    fast_flush_area(pending_req - pending_reqs, pending_req->nr_pages);
  34.294 -
  34.295 -    kfree(purb->setup_packet);
  34.296 -
  34.297 -    make_response(pending_req->usbif_priv, pending_req->id,
  34.298 -		  pending_req->operation, pending_req->status, 0, purb->actual_length);
  34.299 -    usbif_put(pending_req->usbif_priv);
  34.300 -
  34.301 -    usb_free_urb(purb);
  34.302 -
  34.303 -    free_pending(pending_idx);
  34.304 -    
  34.305 -    rmb();
  34.306 -
  34.307 -    /* Check for anything still waiting in the rings, having freed a request... */
  34.308 -    maybe_trigger_usbio_schedule();
  34.309 -}
  34.310 -
  34.311 -/******************************************************************
  34.312 - * SCHEDULER FUNCTIONS
  34.313 - */
  34.314 -
  34.315 -static DECLARE_WAIT_QUEUE_HEAD(usbio_schedule_wait);
  34.316 -
  34.317 -static int usbio_schedule(void *arg)
  34.318 -{
  34.319 -    DECLARE_WAITQUEUE(wq, current);
  34.320 -
  34.321 -    usbif_priv_t          *up;
  34.322 -    struct list_head *ent;
  34.323 -
  34.324 -    daemonize();
  34.325 -
  34.326 -    for ( ; ; )
  34.327 -    {
  34.328 -        /* Wait for work to do. */
  34.329 -        add_wait_queue(&usbio_schedule_wait, &wq);
  34.330 -        set_current_state(TASK_INTERRUPTIBLE);
  34.331 -        if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 
  34.332 -             list_empty(&usbio_schedule_list) )
  34.333 -            schedule();
  34.334 -        __set_current_state(TASK_RUNNING);
  34.335 -        remove_wait_queue(&usbio_schedule_wait, &wq);
  34.336 -
  34.337 -        /* Queue up a batch of requests. */
  34.338 -        while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
  34.339 -                !list_empty(&usbio_schedule_list) )
  34.340 -        {
  34.341 -            ent = usbio_schedule_list.next;
  34.342 -            up = list_entry(ent, usbif_priv_t, usbif_list);
  34.343 -            usbif_get(up);
  34.344 -            remove_from_usbif_list(up);
  34.345 -            if ( do_usb_io_op(up, BATCH_PER_DOMAIN) )
  34.346 -                add_to_usbif_list_tail(up);
  34.347 -            usbif_put(up);
  34.348 -        }
  34.349 -    }
  34.350 -}
  34.351 -
  34.352 -static void maybe_trigger_usbio_schedule(void)
  34.353 -{
  34.354 -    /*
  34.355 -     * Needed so that two processes, who together make the following predicate
  34.356 -     * true, don't both read stale values and evaluate the predicate
  34.357 -     * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
  34.358 -     */
  34.359 -    smp_mb();
  34.360 -
  34.361 -    if ( !list_empty(&usbio_schedule_list) )
  34.362 -        wake_up(&usbio_schedule_wait);
  34.363 -}
  34.364 -
  34.365 -
  34.366 -/******************************************************************************
  34.367 - * NOTIFICATION FROM GUEST OS.
  34.368 - */
  34.369 -
  34.370 -irqreturn_t usbif_be_int(int irq, void *dev_id, struct pt_regs *regs)
  34.371 -{
  34.372 -    usbif_priv_t *up = dev_id;
  34.373 -
  34.374 -    smp_mb();
  34.375 -
  34.376 -    add_to_usbif_list_tail(up); 
  34.377 -
  34.378 -    /* Will in fact /always/ trigger an io schedule in this case. */
  34.379 -    maybe_trigger_usbio_schedule();
  34.380 -
  34.381 -    return IRQ_HANDLED;
  34.382 -}
  34.383 -
  34.384 -
  34.385 -
  34.386 -/******************************************************************
  34.387 - * DOWNWARD CALLS -- These interface with the usb-device layer proper.
  34.388 - */
  34.389 -
  34.390 -static int do_usb_io_op(usbif_priv_t *up, int max_to_do)
  34.391 -{
  34.392 -    usbif_back_ring_t *usb_ring = &up->usb_ring;
  34.393 -    usbif_request_t *req;
  34.394 -    RING_IDX i, rp;
  34.395 -    int more_to_do = 0;
  34.396 -
  34.397 -    rp = usb_ring->sring->req_prod;
  34.398 -    rmb(); /* Ensure we see queued requests up to 'rp'. */
  34.399 -    
  34.400 -    /* Take items off the comms ring, taking care not to overflow. */
  34.401 -    for ( i = usb_ring->req_cons; 
  34.402 -          (i != rp) && !RING_REQUEST_CONS_OVERFLOW(usb_ring, i);
  34.403 -          i++ )
  34.404 -    {
  34.405 -        if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
  34.406 -        {
  34.407 -            more_to_do = 1;
  34.408 -            break;
  34.409 -        }
  34.410 -
  34.411 -        req = RING_GET_REQUEST(usb_ring, i);
  34.412 -        
  34.413 -        switch ( req->operation )
  34.414 -        {
  34.415 -        case USBIF_OP_PROBE:
  34.416 -            dispatch_usb_probe(up, req->id, req->port);
  34.417 -            break;
  34.418 -
  34.419 -        case USBIF_OP_IO:
  34.420 -	  /* Assemble an appropriate URB. */
  34.421 -	  dispatch_usb_io(up, req);
  34.422 -          break;
  34.423 -
  34.424 -	case USBIF_OP_RESET:
  34.425 -	  dispatch_usb_reset(up, req->port);
  34.426 -          break;
  34.427 -
  34.428 -        default:
  34.429 -            DPRINTK("error: unknown USB io operation [%d]\n",
  34.430 -                    req->operation);
  34.431 -            make_response(up, req->id, req->operation, -EINVAL, 0, 0);
  34.432 -            break;
  34.433 -        }
  34.434 -    }
  34.435 -
  34.436 -    usb_ring->req_cons = i;
  34.437 -
  34.438 -    return more_to_do;
  34.439 -}
  34.440 -
  34.441 -static owned_port_t *find_guest_port(usbif_priv_t *up, int port)
  34.442 -{
  34.443 -    unsigned long flags;
  34.444 -    struct list_head *l;
  34.445 -
  34.446 -    spin_lock_irqsave(&owned_ports_lock, flags);
  34.447 -    list_for_each(l, &owned_ports)
  34.448 -    {
  34.449 -        owned_port_t *p = list_entry(l, owned_port_t, list);
  34.450 -        if(p->usbif_priv == up && p->guest_port == port)
  34.451 -        {
  34.452 -            spin_unlock_irqrestore(&owned_ports_lock, flags);
  34.453 -            return p;
  34.454 -        }
  34.455 -    }
  34.456 -    spin_unlock_irqrestore(&owned_ports_lock, flags);
  34.457 -
  34.458 -    return NULL;
  34.459 -}
  34.460 -
  34.461 -static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid)
  34.462 -{
  34.463 -    owned_port_t *port = find_guest_port(up, portid);
  34.464 -    int ret = 0;
  34.465 -
  34.466 -
  34.467 -    /* Allowing the guest to actually reset the device causes more problems
  34.468 -     * than it's worth.  We just fake it out in software but we will do a real
  34.469 -     * reset when the interface is destroyed. */
  34.470 -
  34.471 -    dump_port(port);
  34.472 -
  34.473 -    port->guest_address = 0;
  34.474 -    /* If there's an attached device then the port is now enabled. */
  34.475 -    if ( port->dev_present )
  34.476 -        port->enabled = 1;
  34.477 -    else
  34.478 -        port->enabled = 0;
  34.479 -
  34.480 -    make_response(up, 0, USBIF_OP_RESET, ret, 0, 0);
  34.481 -}
  34.482 -
  34.483 -static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned long portid)
  34.484 -{
  34.485 -    owned_port_t *port = find_guest_port(up, portid);
  34.486 -    int ret;
  34.487 - 
  34.488 -    if ( port != NULL )
  34.489 -        ret = port->dev_present;
  34.490 -    else
  34.491 -    {
  34.492 -        ret = -EINVAL;
  34.493 -        printk(KERN_INFO "dispatch_usb_probe(): invalid port probe request "
  34.494 -	       "(port %ld)\n", portid);
  34.495 -    }
  34.496 -
  34.497 -    /* Probe result is sent back in-band.  Probes don't have an associated id
  34.498 -     * right now... */
  34.499 -    make_response(up, id, USBIF_OP_PROBE, ret, portid, 0);
  34.500 -}
  34.501 -
  34.502 -/**
  34.503 - * check_iso_schedule - safety check the isochronous schedule for an URB
  34.504 - * @purb : the URB in question
  34.505 - */
  34.506 -static int check_iso_schedule(struct urb *purb)
  34.507 -{
  34.508 -    int i;
  34.509 -    unsigned long total_length = 0;
  34.510 -    
  34.511 -    for ( i = 0; i < purb->number_of_packets; i++ )
  34.512 -    {
  34.513 -        struct usb_iso_packet_descriptor *desc = &purb->iso_frame_desc[i];
  34.514 -        
  34.515 -        if ( desc->offset >= purb->transfer_buffer_length
  34.516 -            || ( desc->offset + desc->length) > purb->transfer_buffer_length )
  34.517 -            return -EINVAL;
  34.518 -
  34.519 -        total_length += desc->length;
  34.520 -
  34.521 -        if ( total_length > purb->transfer_buffer_length )
  34.522 -            return -EINVAL;
  34.523 -    }
  34.524 -    
  34.525 -    return 0;
  34.526 -}
  34.527 -
  34.528 -owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req);
  34.529 -
  34.530 -static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req)
  34.531 -{
  34.532 -    unsigned long buffer_mach;
  34.533 -    int i = 0, offset = 0,
  34.534 -        pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
  34.535 -    pending_req_t *pending_req;
  34.536 -    unsigned long  remap_prot;
  34.537 -    multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
  34.538 -    struct urb *purb = NULL;
  34.539 -    owned_port_t *port;
  34.540 -    unsigned char *setup;    
  34.541 -
  34.542 -    dump_request(req);
  34.543 -
  34.544 -    if ( NR_PENDING_REQS == MAX_PENDING_REQS )
  34.545 -    {
  34.546 -        printk(KERN_WARNING "usbback: Max requests already queued. "
  34.547 -	       "Giving up!\n");
  34.548 -        
  34.549 -        return;
  34.550 -    }
  34.551 -
  34.552 -    port = find_port_for_request(up, req);
  34.553 -
  34.554 -    if ( port == NULL )
  34.555 -    {
  34.556 -	printk(KERN_WARNING "No such device! (%d)\n", req->devnum);
  34.557 -	dump_request(req);
  34.558 -
  34.559 -        make_response(up, req->id, req->operation, -ENODEV, 0, 0);
  34.560 -	return;
  34.561 -    }
  34.562 -    else if ( !port->dev_present )
  34.563 -    {
  34.564 -        /* In normal operation, we'll only get here if a device is unplugged
  34.565 -         * and the frontend hasn't noticed yet. */
  34.566 -        make_response(up, req->id, req->operation, -ENODEV, 0, 0);
  34.567 -	return;
  34.568 -    }
  34.569 -        
  34.570 -
  34.571 -    setup = kmalloc(8, GFP_KERNEL);
  34.572 -
  34.573 -    if ( setup == NULL )
  34.574 -        goto no_mem;
  34.575 -   
  34.576 -    /* Copy request out for safety. */
  34.577 -    memcpy(setup, req->setup, 8);
  34.578 -
  34.579 -    if( setup[0] == 0x0 && setup[1] == 0x5)
  34.580 -    {
  34.581 -        /* To virtualise the USB address space, we need to intercept
  34.582 -         * set_address messages and emulate.  From the USB specification:
  34.583 -         * bmRequestType = 0x0;
  34.584 -         * Brequest = SET_ADDRESS (i.e. 0x5)
  34.585 -         * wValue = device address
  34.586 -         * wIndex = 0
  34.587 -         * wLength = 0
  34.588 -         * data = None
  34.589 -         */
  34.590 -        /* Store into the guest transfer buffer using cpu_to_le16 */
  34.591 -        port->guest_address = le16_to_cpu(*(u16 *)(setup + 2));
  34.592 -        /* Make a successful response.  That was easy! */
  34.593 -
  34.594 -        make_response(up, req->id, req->operation, 0, 0, 0);
  34.595 -
  34.596 -	kfree(setup);
  34.597 -        return;
  34.598 -    }
  34.599 -    else if ( setup[0] == 0x0 && setup[1] == 0x9 )
  34.600 -    {
  34.601 -        /* The host kernel needs to know what device configuration is in use
  34.602 -         * because various error checks get confused otherwise.  We just do
  34.603 -         * configuration settings here, under controlled conditions.
  34.604 -         */
  34.605 -
  34.606 -      /* Ignore configuration setting and hope that the host kernel
  34.607 -	 did it right. */
  34.608 -        /* usb_set_configuration(port->dev, setup[2]); */
  34.609 -
  34.610 -        make_response(up, req->id, req->operation, 0, 0, 0);
  34.611 -
  34.612 -        kfree(setup);
  34.613 -        return;
  34.614 -    }
  34.615 -    else if ( setup[0] == 0x1 && setup[1] == 0xB )
  34.616 -    {
  34.617 -        /* The host kernel needs to know what device interface is in use
  34.618 -         * because various error checks get confused otherwise.  We just do
  34.619 -         * configuration settings here, under controlled conditions.
  34.620 -         */
  34.621 -        usb_set_interface(port->dev, (setup[4] | setup[5] << 8),
  34.622 -                          (setup[2] | setup[3] << 8) );
  34.623 -
  34.624 -        make_response(up, req->id, req->operation, 0, 0, 0);
  34.625 -
  34.626 -        kfree(setup);
  34.627 -        return;
  34.628 -    }
  34.629 -
  34.630 -    if ( ( req->transfer_buffer - (req->transfer_buffer & PAGE_MASK)
  34.631 -	   + req->length )
  34.632 -	 > MMAP_PAGES_PER_REQUEST * PAGE_SIZE )
  34.633 -    {
  34.634 -        printk(KERN_WARNING "usbback: request of %lu bytes too large\n",
  34.635 -	       req->length);
  34.636 -        make_response(up, req->id, req->operation, -EINVAL, 0, 0);
  34.637 -        kfree(setup);
  34.638 -        return;
  34.639 -    }
  34.640 -    
  34.641 -    buffer_mach = req->transfer_buffer;
  34.642 -
  34.643 -    if( buffer_mach == 0 )
  34.644 -	goto no_remap;
  34.645 -
  34.646 -    ASSERT((req->length >> PAGE_SHIFT) <= MMAP_PAGES_PER_REQUEST);
  34.647 -    ASSERT(buffer_mach);
  34.648 -
  34.649 -    /* Always map writeable for now. */
  34.650 -    remap_prot = _KERNPG_TABLE;
  34.651 -
  34.652 -    for ( i = 0, offset = 0; offset < req->length;
  34.653 -          i++, offset += PAGE_SIZE )
  34.654 -    {
  34.655 -	MULTI_update_va_mapping_otherdomain(
  34.656 -	    mcl+i, MMAP_VADDR(pending_idx, i),
  34.657 -	    pfn_pte_ma((buffer_mach + offset) >> PAGE_SHIFT, remap_prot),
  34.658 -	    0, up->domid);
  34.659 -        
  34.660 -        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
  34.661 -            FOREIGN_FRAME((buffer_mach + offset) >> PAGE_SHIFT);
  34.662 -
  34.663 -        ASSERT(virt_to_mfn(MMAP_VADDR(pending_idx, i))
  34.664 -               == ((buffer_mach >> PAGE_SHIFT) + i));
  34.665 -    }
  34.666 -
  34.667 -    if ( req->pipe_type == 0 && req->num_iso > 0 ) /* Maybe schedule ISO... */
  34.668 -    {
  34.669 -        /* Map in ISO schedule, if necessary. */
  34.670 -	MULTI_update_va_mapping_otherdomain(
  34.671 -	    mcl+i, MMAP_VADDR(pending_idx, i),
  34.672 -	    pfn_pte_ma(req->iso_schedule >> PAGE_SHIFT, remap_prot),
  34.673 -	    0, up->domid);
  34.674 -
  34.675 -        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
  34.676 -            FOREIGN_FRAME(req->iso_schedule >> PAGE_SHIFT);
  34.677 -    
  34.678 -        i++;
  34.679 -    }
  34.680 -
  34.681 -    if ( unlikely(HYPERVISOR_multicall(mcl, i) != 0) )
  34.682 -        BUG();
  34.683 -    
  34.684 -    {
  34.685 -        int j;
  34.686 -        for ( j = 0; j < i; j++ )
  34.687 -        {
  34.688 -            if ( unlikely(mcl[j].result != 0) )
  34.689 -            {
  34.690 -                printk(KERN_WARNING
  34.691 -		       "invalid buffer %d -- could not remap it\n", j);
  34.692 -                fast_flush_area(pending_idx, i);
  34.693 -                goto bad_descriptor;
  34.694 -            }
  34.695 -	}
  34.696 -    }
  34.697 -    
  34.698 - no_remap:
  34.699 -
  34.700 -    ASSERT(i <= MMAP_PAGES_PER_REQUEST);
  34.701 -    ASSERT(i * PAGE_SIZE >= req->length);
  34.702 -
  34.703 -    /* We have to do this because some things might complete out of order. */
  34.704 -    pending_req = &pending_reqs[pending_idx];
  34.705 -    pending_req->usbif_priv= up;
  34.706 -    pending_req->id        = req->id;
  34.707 -    pending_req->operation = req->operation;
  34.708 -    pending_req->nr_pages  = i;
  34.709 -
  34.710 -    pending_cons++;
  34.711 -
  34.712 -    usbif_get(up);
  34.713 -    
  34.714 -    /* Fill out an actual request for the USB layer. */
  34.715 -    purb = usb_alloc_urb(req->num_iso);
  34.716 -
  34.717 -    if ( purb == NULL )
  34.718 -    {
  34.719 -        usbif_put(up);
  34.720 -        free_pending(pending_idx);
  34.721 -        goto no_mem;
  34.722 -    }
  34.723 -
  34.724 -    purb->dev = port->dev;
  34.725 -    purb->context = pending_req;
  34.726 -    purb->transfer_buffer =
  34.727 -        (void *)(MMAP_VADDR(pending_idx, 0) + (buffer_mach & ~PAGE_MASK));
  34.728 -    if(buffer_mach == 0)
  34.729 -      purb->transfer_buffer = NULL;
  34.730 -    purb->complete = __end_usb_io_op;
  34.731 -    purb->transfer_buffer_length = req->length;
  34.732 -    purb->transfer_flags = req->transfer_flags;
  34.733 -
  34.734 -    purb->pipe = 0;
  34.735 -    purb->pipe |= req->direction << 7;
  34.736 -    purb->pipe |= port->dev->devnum << 8;
  34.737 -    purb->pipe |= req->speed << 26;
  34.738 -    purb->pipe |= req->pipe_type << 30;
  34.739 -    purb->pipe |= req->endpoint << 15;
  34.740 -
  34.741 -    purb->number_of_packets = req->num_iso;
  34.742 -
  34.743 -    if ( purb->number_of_packets * sizeof(usbif_iso_t) > PAGE_SIZE )
  34.744 -        goto urb_error;
  34.745 -
  34.746 -    /* Make sure there's always some kind of timeout. */
  34.747 -    purb->timeout = ( req->timeout > 0 ) ? (req->timeout * HZ) / 1000
  34.748 -                    :  1000;
  34.749 -
  34.750 -    purb->setup_packet = setup;
  34.751 -
  34.752 -    if ( req->pipe_type == 0 ) /* ISO */
  34.753 -    {
  34.754 -        int j;
  34.755 -        usbif_iso_t *iso_sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, i - 1);
  34.756 -
  34.757 -        /* If we're dealing with an iso pipe, we need to copy in a schedule. */
  34.758 -        for ( j = 0; j < purb->number_of_packets; j++ )
  34.759 -        {
  34.760 -            purb->iso_frame_desc[j].length = iso_sched[j].length;
  34.761 -            purb->iso_frame_desc[j].offset = iso_sched[j].buffer_offset;
  34.762 -            iso_sched[j].status = 0;
  34.763 -        }
  34.764 -    }
  34.765 -
  34.766 -    if ( check_iso_schedule(purb) != 0 )
  34.767 -        goto urb_error;
  34.768 -
  34.769 -    if ( usb_submit_urb(purb) != 0 )
  34.770 -        goto urb_error;
  34.771 -
  34.772 -    return;
  34.773 -
  34.774 - urb_error:
  34.775 -    dump_urb(purb);    
  34.776 -    usbif_put(up);
  34.777 -    free_pending(pending_idx);
  34.778 -
  34.779 - bad_descriptor:
  34.780 -    kfree ( setup );
  34.781 -    if ( purb != NULL )
  34.782 -        usb_free_urb(purb);
  34.783 -    make_response(up, req->id, req->operation, -EINVAL, 0, 0);
  34.784 -    return;
  34.785 -    
  34.786 - no_mem:
  34.787 -    if ( setup != NULL )
  34.788 -        kfree(setup);
  34.789 -    make_response(up, req->id, req->operation, -ENOMEM, 0, 0);
  34.790 -    return;
  34.791 -} 
  34.792 -
  34.793 -
  34.794 -
  34.795 -/******************************************************************
  34.796 - * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
  34.797 - */
  34.798 -
  34.799 -
  34.800 -static void make_response(usbif_priv_t *up, unsigned long id,
  34.801 -                          unsigned short op, int st, int inband,
  34.802 -			  unsigned long length)
  34.803 -{
  34.804 -    usbif_response_t *resp;
  34.805 -    unsigned long     flags;
  34.806 -    usbif_back_ring_t *usb_ring = &up->usb_ring;
  34.807 -
  34.808 -    /* Place on the response ring for the relevant domain. */ 
  34.809 -    spin_lock_irqsave(&up->usb_ring_lock, flags);
  34.810 -    resp = RING_GET_RESPONSE(usb_ring, usb_ring->rsp_prod_pvt);
  34.811 -    resp->id        = id;
  34.812 -    resp->operation = op;
  34.813 -    resp->status    = st;
  34.814 -    resp->data      = inband;
  34.815 -    resp->length = length;
  34.816 -    wmb(); /* Ensure other side can see the response fields. */
  34.817 -
  34.818 -    dump_response(resp);
  34.819 -
  34.820 -    usb_ring->rsp_prod_pvt++;
  34.821 -    RING_PUSH_RESPONSES(usb_ring);
  34.822 -    spin_unlock_irqrestore(&up->usb_ring_lock, flags);
  34.823 -
  34.824 -    /* Kick the relevant domain. */
  34.825 -    notify_via_evtchn(up->evtchn);
  34.826 -}
  34.827 -
  34.828 -/**
  34.829 - * usbif_claim_port - claim devices on a port on behalf of guest
  34.830 - *
  34.831 - * Once completed, this will ensure that any device attached to that
  34.832 - * port is claimed by this driver for use by the guest.
  34.833 - */
  34.834 -int usbif_claim_port(usbif_be_claim_port_t *msg)
  34.835 -{
  34.836 -    owned_port_t *o_p;
  34.837 -    
  34.838 -    /* Sanity... */
  34.839 -    if ( usbif_find_port(msg->path) != NULL )
  34.840 -    {
  34.841 -        printk(KERN_WARNING "usbback: Attempted to claim USB port "
  34.842 -               "we already own!\n");
  34.843 -        return -EINVAL;
  34.844 -    }
  34.845 -
  34.846 -    /* No need for a slab cache - this should be infrequent. */
  34.847 -    o_p = kmalloc(sizeof(owned_port_t), GFP_KERNEL);
  34.848 -
  34.849 -    if ( o_p == NULL )
  34.850 -        return -ENOMEM;
  34.851 -
  34.852 -    o_p->enabled = 0;
  34.853 -    o_p->usbif_priv = usbif_find(msg->domid);
  34.854 -    o_p->guest_port = msg->usbif_port;
  34.855 -    o_p->dev_present = 0;
  34.856 -    o_p->guest_address = 0; /* Default address. */
  34.857 -
  34.858 -    strcpy(o_p->path, msg->path);
  34.859 -
  34.860 -    spin_lock_irq(&owned_ports_lock);
  34.861 -    
  34.862 -    list_add(&o_p->list, &owned_ports);
  34.863 -
  34.864 -    spin_unlock_irq(&owned_ports_lock);
  34.865 -
  34.866 -    printk(KERN_INFO "usbback: Claimed USB port (%s) for %d.%d\n", o_p->path,
  34.867 -	   msg->domid, msg->usbif_port);
  34.868 -
  34.869 -    /* Force a reprobe for unclaimed devices. */
  34.870 -    usb_scan_devices();
  34.871 -
  34.872 -    return 0;
  34.873 -}
  34.874 -
  34.875 -owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req)
  34.876 -{
  34.877 -    unsigned long flags;
  34.878 -    struct list_head *port;
  34.879 -
  34.880 -    /* I'm assuming this is not called from IRQ context - correct?  I think
  34.881 -     * it's probably only called in response to control messages or plug events
  34.882 -     * in the USB hub kernel thread, so should be OK. */
  34.883 -    spin_lock_irqsave(&owned_ports_lock, flags);
  34.884 -    list_for_each(port, &owned_ports)
  34.885 -    {
  34.886 -        owned_port_t *p = list_entry(port, owned_port_t, list);
  34.887 -        if(p->usbif_priv == up && p->guest_address == req->devnum && p->enabled )
  34.888 -	  {
  34.889 -              dump_port(p);
  34.890 -
  34.891 -	      spin_unlock_irqrestore(&owned_ports_lock, flags);
  34.892 -              return p;
  34.893 -	  }
  34.894 -    }
  34.895 -    spin_unlock_irqrestore(&owned_ports_lock, flags);
  34.896 -
  34.897 -    return NULL;    
  34.898 -}
  34.899 -
  34.900 -owned_port_t *__usbif_find_port(char *path)
  34.901 -{
  34.902 -    struct list_head *port;
  34.903 -
  34.904 -    list_for_each(port, &owned_ports)
  34.905 -    {
  34.906 -        owned_port_t *p = list_entry(port, owned_port_t, list);
  34.907 -        if(!strcmp(path, p->path))
  34.908 -        {
  34.909 -            return p;
  34.910 -        }
  34.911 -    }
  34.912 -
  34.913 -    return NULL;
  34.914 -}
  34.915 -
  34.916 -owned_port_t *usbif_find_port(char *path)
  34.917 -{
  34.918 -    owned_port_t *ret;
  34.919 -    unsigned long flags;
  34.920 -
  34.921 -    spin_lock_irqsave(&owned_ports_lock, flags);
  34.922 -    ret = __usbif_find_port(path);    
  34.923 -    spin_unlock_irqrestore(&owned_ports_lock, flags);
  34.924 -
  34.925 -    return ret;
  34.926 -}
  34.927 -
  34.928 -
  34.929 -static void *probe(struct usb_device *dev, unsigned iface,
  34.930 -                   const struct usb_device_id *id)
  34.931 -{
  34.932 -    owned_port_t *p;
  34.933 -
  34.934 -    /* We don't care what the device is - if we own the port, we want it.  We
  34.935 -     * don't deal with device-specifics in this driver, so we don't care what
  34.936 -     * the device actually is ;-) */
  34.937 -    if ( ( p = usbif_find_port(dev->devpath) ) != NULL )
  34.938 -    {
  34.939 -        printk(KERN_INFO "usbback: claimed device attached to owned port\n");
  34.940 -
  34.941 -        p->dev_present = 1;
  34.942 -        p->dev = dev;
  34.943 -        set_bit(iface, &p->ifaces);
  34.944 -        
  34.945 -        return p->usbif_priv;
  34.946 -    }
  34.947 -    else
  34.948 -        printk(KERN_INFO "usbback: hotplug for non-owned port (%s), ignoring\n",
  34.949 -	       dev->devpath);
  34.950 -   
  34.951 -
  34.952 -    return NULL;
  34.953 -}
  34.954 -
  34.955 -static void disconnect(struct usb_device *dev, void *usbif)
  34.956 -{
  34.957 -    /* Note the device is removed so we can tell the guest when it probes. */
  34.958 -    owned_port_t *port = usbif_find_port(dev->devpath);
  34.959 -    port->dev_present = 0;
  34.960 -    port->dev = NULL;
  34.961 -    port->ifaces = 0;
  34.962 -}
  34.963 -
  34.964 -
  34.965 -struct usb_driver driver =
  34.966 -{
  34.967 -    .owner      = THIS_MODULE,
  34.968 -    .name       = "Xen USB Backend",
  34.969 -    .probe      = probe,
  34.970 -    .disconnect = disconnect,
  34.971 -    .id_table   = NULL,
  34.972 -};
  34.973 -
  34.974 -/* __usbif_release_port - internal mechanics for releasing a port */
  34.975 -void __usbif_release_port(owned_port_t *p)
  34.976 -{
  34.977 -    int i;
  34.978 -
  34.979 -    for ( i = 0; p->ifaces != 0; i++)
  34.980 -        if ( p->ifaces & 1 << i )
  34.981 -        {
  34.982 -            usb_driver_release_interface(&driver, usb_ifnum_to_if(p->dev, i));
  34.983 -            clear_bit(i, &p->ifaces);
  34.984 -        }
  34.985 -    list_del(&p->list);
  34.986 -
  34.987 -    /* Reset the real device.  We don't simulate disconnect / probe for other
  34.988 -     * drivers in this kernel because we assume the device is completely under
  34.989 -     * the control of ourselves (i.e. the guest!).  This should ensure that the
  34.990 -     * device is in a sane state for the next customer ;-) */
  34.991 -
  34.992 -    /* MAW NB: we're not resetting the real device here.  This looks perfectly
  34.993 -     * valid to me but it causes memory corruption.  We seem to get away with not
  34.994 -     * resetting for now, although it'd be nice to have this tracked down. */
  34.995 -/*     if ( p->dev != NULL) */
  34.996 -/*         usb_reset_device(p->dev); */
  34.997 -
  34.998 -    kfree(p);
  34.999 -}
 34.1000 -
 34.1001 -
 34.1002 -/**
 34.1003 - * usbif_release_port - stop claiming devices on a port on behalf of guest
 34.1004 - */
 34.1005 -void usbif_release_port(usbif_be_release_port_t *msg)
 34.1006 -{
 34.1007 -    owned_port_t *p;
 34.1008 -
 34.1009 -    spin_lock_irq(&owned_ports_lock);
 34.1010 -    p = __usbif_find_port(msg->path);
 34.1011 -    __usbif_release_port(p);
 34.1012 -    spin_unlock_irq(&owned_ports_lock);
 34.1013 -}
 34.1014 -
 34.1015 -void usbif_release_ports(usbif_priv_t *up)
 34.1016 -{
 34.1017 -    struct list_head *port, *tmp;
 34.1018 -    unsigned long flags;
 34.1019 -    
 34.1020 -    spin_lock_irqsave(&owned_ports_lock, flags);
 34.1021 -    list_for_each_safe(port, tmp, &owned_ports)
 34.1022 -    {
 34.1023 -        owned_port_t *p = list_entry(port, owned_port_t, list);
 34.1024 -        if ( p->usbif_priv == up )
 34.1025 -            __usbif_release_port(p);
 34.1026 -    }
 34.1027 -    spin_unlock_irqrestore(&owned_ports_lock, flags);
 34.1028 -}
 34.1029 -
 34.1030 -static int __init usbif_init(void)
 34.1031 -{
 34.1032 -    int i;
 34.1033 -    struct page *page;
 34.1034 -
 34.1035 -    if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
 34.1036 -         !(xen_start_info->flags & SIF_USB_BE_DOMAIN) )
 34.1037 -        return 0;
 34.1038 -
 34.1039 -    page = balloon_alloc_empty_page_range(MMAP_PAGES);
 34.1040 -    BUG_ON(page == NULL);
 34.1041 -    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
 34.1042 -
 34.1043 -    pending_cons = 0;
 34.1044 -    pending_prod = MAX_PENDING_REQS;
 34.1045 -    memset(pending_reqs, 0, sizeof(pending_reqs));
 34.1046 -    for ( i = 0; i < MAX_PENDING_REQS; i++ )
 34.1047 -        pending_ring[i] = i;
 34.1048 -
 34.1049 -    spin_lock_init(&pend_prod_lock);
 34.1050 -
 34.1051 -    spin_lock_init(&owned_ports_lock);
 34.1052 -    INIT_LIST_HEAD(&owned_ports);
 34.1053 -
 34.1054 -    spin_lock_init(&usbio_schedule_list_lock);
 34.1055 -    INIT_LIST_HEAD(&usbio_schedule_list);
 34.1056 -
 34.1057 -    if ( kernel_thread(usbio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
 34.1058 -        BUG();
 34.1059 -    
 34.1060 -    usbif_interface_init();
 34.1061 -
 34.1062 -    usbif_ctrlif_init();
 34.1063 -
 34.1064 -    usb_register(&driver);
 34.1065 -
 34.1066 -    printk(KERN_INFO "Xen USB Backend Initialised");
 34.1067 -
 34.1068 -    return 0;
 34.1069 -}
 34.1070 -
 34.1071 -__initcall(usbif_init);
    35.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbfront/usbfront.c	Thu Sep 22 16:05:44 2005 +0100
    35.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    35.3 @@ -1,1735 +0,0 @@
    35.4 -/*
    35.5 - * Xen Virtual USB Frontend Driver 
    35.6 - *
    35.7 - * This file contains the first version of the Xen virtual USB hub
    35.8 - * that I've managed not to delete by mistake (3rd time lucky!).
    35.9 - *
   35.10 - * Based on Linux's uhci.c, original copyright notices are displayed
   35.11 - * below.  Portions also (c) 2004 Intel Research Cambridge
   35.12 - * and (c) 2004, 2005 Mark Williamson
   35.13 - *
   35.14 - * Contact <mark.williamson@cl.cam.ac.uk> or
   35.15 - * <xen-devel@lists.sourceforge.net> regarding this code.
   35.16 - *
   35.17 - * Still to be (maybe) implemented:
   35.18 - * - migration / backend restart support?
   35.19 - * - support for building / using as a module
   35.20 - */
   35.21 -
   35.22 -/*
   35.23 - * Universal Host Controller Interface driver for USB.
   35.24 - *
   35.25 - * Maintainer: Johannes Erdfelt <johannes@erdfelt.com>
   35.26 - *
   35.27 - * (C) Copyright 1999 Linus Torvalds
   35.28 - * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
   35.29 - * (C) Copyright 1999 Randy Dunlap
   35.30 - * (C) Copyright 1999 Georg Acher, acher@in.tum.de
   35.31 - * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
   35.32 - * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
   35.33 - * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
   35.34 - * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
   35.35 - *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
   35.36 - * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
   35.37 - *
   35.38 - * Intel documents this fairly well, and as far as I know there
   35.39 - * are no royalties or anything like that, but even so there are
   35.40 - * people who decided that they want to do the same thing in a
   35.41 - * completely different way.
   35.42 - *
   35.43 - * WARNING! The USB documentation is downright evil. Most of it
   35.44 - * is just crap, written by a committee. You're better off ignoring
   35.45 - * most of it, the important stuff is:
   35.46 - *  - the low-level protocol (fairly simple but lots of small details)
   35.47 - *  - working around the horridness of the rest
   35.48 - */
   35.49 -
   35.50 -#include <linux/config.h>
   35.51 -#include <linux/module.h>
   35.52 -#include <linux/kernel.h>
   35.53 -#include <linux/init.h>
   35.54 -#include <linux/sched.h>
   35.55 -#include <linux/delay.h>
   35.56 -#include <linux/slab.h>
   35.57 -#include <linux/smp_lock.h>
   35.58 -#include <linux/errno.h>
   35.59 -#include <linux/interrupt.h>
   35.60 -#include <linux/spinlock.h>
   35.61 -#ifdef CONFIG_USB_DEBUG
   35.62 -#define DEBUG
   35.63 -#else
   35.64 -#undef DEBUG
   35.65 -#endif
   35.66 -#include <linux/usb.h>
   35.67 -
   35.68 -#include <asm/irq.h>
   35.69 -#include <asm/system.h>
   35.70 -
   35.71 -#include "xhci.h"
   35.72 -
   35.73 -#include "../../../../../drivers/usb/hcd.h"
   35.74 -
   35.75 -#include <asm-xen/xen-public/io/usbif.h>
   35.76 -#include <asm/xen-public/io/domain_controller.h>
   35.77 -
   35.78 -/*
   35.79 - * Version Information
   35.80 - */
   35.81 -#define DRIVER_VERSION "v1.0"
   35.82 -#define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, " \
   35.83 -                      "Randy Dunlap, Georg Acher, Deti Fliegl, " \
   35.84 -                      "Thomas Sailer, Roman Weissgaerber, Mark Williamson"
   35.85 -#define DRIVER_DESC "Xen Virtual USB Host Controller Interface"
   35.86 -
   35.87 -/*
   35.88 - * debug = 0, no debugging messages
   35.89 - * debug = 1, dump failed URB's except for stalls
   35.90 - * debug = 2, dump all failed URB's (including stalls)
   35.91 - */
   35.92 -#ifdef DEBUG
   35.93 -static int debug = 1;
   35.94 -#else
   35.95 -static int debug = 0;
   35.96 -#endif
   35.97 -MODULE_PARM(debug, "i");
   35.98 -MODULE_PARM_DESC(debug, "Debug level");
   35.99 -static char *errbuf;
  35.100 -#define ERRBUF_LEN    (PAGE_SIZE * 8)
  35.101 -
  35.102 -static int rh_submit_urb(struct urb *urb);
  35.103 -static int rh_unlink_urb(struct urb *urb);
  35.104 -static int xhci_unlink_urb(struct urb *urb);
  35.105 -static void xhci_call_completion(struct urb *urb);
  35.106 -static void xhci_drain_ring(void);
  35.107 -static void xhci_transfer_result(struct xhci *xhci, struct urb *urb);
  35.108 -static void xhci_finish_completion(void);
  35.109 -
  35.110 -#define MAX_URB_LOOP	2048		/* Maximum number of linked URB's */
  35.111 -
  35.112 -static kmem_cache_t *xhci_up_cachep;	/* urb_priv cache */
  35.113 -static struct xhci *xhci;               /* XHCI structure for the interface */
  35.114 -
  35.115 -/******************************************************************************
  35.116 - * DEBUGGING
  35.117 - */
  35.118 -
  35.119 -#ifdef DEBUG
  35.120 -
  35.121 -static void dump_urb(struct urb *urb)
  35.122 -{
  35.123 -    printk(KERN_DEBUG "dumping urb @ %p\n"
  35.124 -           "  hcpriv = %p\n"
  35.125 -           "  next = %p\n"
  35.126 -           "  dev = %p\n"
  35.127 -           "  pipe = 0x%lx\n"
  35.128 -           "  status = %d\n"
  35.129 -           "  transfer_flags = 0x%lx\n"
  35.130 -           "  transfer_buffer = %p\n"
  35.131 -           "  transfer_buffer_length = %d\n"
  35.132 -           "  actual_length = %d\n"
  35.133 -           "  bandwidth = %d\n"
  35.134 -           "  setup_packet = %p\n",
  35.135 -           urb, urb->hcpriv, urb->next, urb->dev, urb->pipe, urb->status,
  35.136 -           urb->transfer_flags, urb->transfer_buffer,
  35.137 -           urb->transfer_buffer_length, urb->actual_length, urb->bandwidth,
  35.138 -           urb->setup_packet);
  35.139 -    if ( urb->setup_packet != NULL )
  35.140 -        printk(KERN_DEBUG
  35.141 -               "setup = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n",
  35.142 -               urb->setup_packet[0], urb->setup_packet[1],
  35.143 -               urb->setup_packet[2], urb->setup_packet[3],
  35.144 -               urb->setup_packet[4], urb->setup_packet[5],
  35.145 -               urb->setup_packet[6], urb->setup_packet[7]);
  35.146 -    printk(KERN_DEBUG "complete = %p\n"
  35.147 -           "interval = %d\n", urb->complete, urb->interval);
  35.148 -        
  35.149 -}
  35.150 -
  35.151 -static void xhci_show_resp(usbif_response_t *r)
  35.152 -{
  35.153 -        printk(KERN_DEBUG "dumping response @ %p\n"
  35.154 -               "  id=0x%lx\n"
  35.155 -               "  op=0x%x\n"
  35.156 -               "  data=0x%x\n"
  35.157 -               "  status=0x%x\n"
  35.158 -               "  length=0x%lx\n",
  35.159 -               r->id, r->operation, r->data, r->status, r->length);
  35.160 -}
  35.161 -
  35.162 -#define DPRINK(...) printk(KERN_DEBUG __VA_ARGS__)
  35.163 -
  35.164 -#else /* DEBUG */
  35.165 -
  35.166 -#define dump_urb(blah) ((void)0)
  35.167 -#define xhci_show_resp(blah) ((void)0)
  35.168 -#define DPRINTK(blah,...) ((void)0)
  35.169 -
  35.170 -#endif /* DEBUG */
  35.171 -
  35.172 -/******************************************************************************
  35.173 - * RING REQUEST HANDLING
  35.174 - */
  35.175 -
  35.176 -#define RING_PLUGGED(_hc) ( RING_FULL(&_hc->usb_ring) || _hc->recovery )
  35.177 -
  35.178 -/**
  35.179 - * xhci_construct_isoc - add isochronous information to a request
  35.180 - */
  35.181 -static int xhci_construct_isoc(usbif_request_t *req, struct urb *urb)
  35.182 -{
  35.183 -        usbif_iso_t *schedule;
  35.184 -        int i;
  35.185 -        struct urb_priv *urb_priv = urb->hcpriv;
  35.186 -        
  35.187 -        req->num_iso = urb->number_of_packets;
  35.188 -        schedule = (usbif_iso_t *)__get_free_page(GFP_KERNEL);
  35.189 -
  35.190 -        if ( schedule == NULL )
  35.191 -            return -ENOMEM;
  35.192 -
  35.193 -        for ( i = 0; i < req->num_iso; i++ )
  35.194 -        {
  35.195 -                schedule[i].buffer_offset = urb->iso_frame_desc[i].offset;
  35.196 -                schedule[i].length = urb->iso_frame_desc[i].length;
  35.197 -        }
  35.198 -
  35.199 -        urb_priv->schedule = schedule;
  35.200 -	req->iso_schedule = virt_to_mfn(schedule) << PAGE_SHIFT;
  35.201 -
  35.202 -        return 0;
  35.203 -}
  35.204 -
  35.205 -/**
  35.206 - * xhci_queue_req - construct and queue request for an URB
  35.207 - */
  35.208 -static int xhci_queue_req(struct urb *urb)
  35.209 -{
  35.210 -        unsigned long flags;
  35.211 -        usbif_request_t *req;
  35.212 -        usbif_front_ring_t *usb_ring = &xhci->usb_ring;
  35.213 -
  35.214 -#if DEBUG
  35.215 -        printk(KERN_DEBUG
  35.216 -               "usbif = %p, req_prod = %d (@ 0x%lx), resp_prod = %d, resp_cons = %d\n",
  35.217 -               usbif, usbif->req_prod, virt_to_mfn(&usbif->req_prod),
  35.218 -               usbif->resp_prod, xhci->usb_resp_cons);
  35.219 -#endif
  35.220 -        
  35.221 -        spin_lock_irqsave(&xhci->ring_lock, flags);
  35.222 -
  35.223 -        if ( RING_PLUGGED(xhci) )
  35.224 -        {
  35.225 -                printk(KERN_WARNING
  35.226 -                       "xhci_queue_req(): USB ring plugged, not queuing request\n");
  35.227 -                spin_unlock_irqrestore(&xhci->ring_lock, flags);
  35.228 -                return -ENOBUFS;
  35.229 -        }
  35.230 -
  35.231 -        /* Stick something in the shared communications ring. */
  35.232 -	req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
  35.233 -
  35.234 -        req->operation       = USBIF_OP_IO;
  35.235 -        req->port            = 0; /* We don't care what the port is. */
  35.236 -        req->id              = (unsigned long) urb->hcpriv;
  35.237 -        req->transfer_buffer = virt_to_mfn(urb->transfer_buffer) << PAGE_SHIFT;
  35.238 -	req->devnum          = usb_pipedevice(urb->pipe);
  35.239 -        req->direction       = usb_pipein(urb->pipe);
  35.240 -	req->speed           = usb_pipeslow(urb->pipe);
  35.241 -        req->pipe_type       = usb_pipetype(urb->pipe);
  35.242 -        req->length          = urb->transfer_buffer_length;
  35.243 -        req->transfer_flags  = urb->transfer_flags;
  35.244 -	req->endpoint        = usb_pipeendpoint(urb->pipe);
  35.245 -	req->speed           = usb_pipeslow(urb->pipe);
  35.246 -	req->timeout         = urb->timeout * (1000 / HZ);
  35.247 -
  35.248 -        if ( usb_pipetype(urb->pipe) == 0 ) /* ISO */
  35.249 -        {
  35.250 -            int ret = xhci_construct_isoc(req, urb);
  35.251 -            if ( ret != 0 )
  35.252 -                return ret;
  35.253 -        }
  35.254 -
  35.255 -	if(urb->setup_packet != NULL)
  35.256 -                memcpy(req->setup, urb->setup_packet, 8);
  35.257 -        else
  35.258 -                memset(req->setup, 0, 8);
  35.259 -        
  35.260 -        usb_ring->req_prod_pvt++;
  35.261 -        RING_PUSH_REQUESTS(usb_ring);
  35.262 -
  35.263 -        spin_unlock_irqrestore(&xhci->ring_lock, flags);
  35.264 -
  35.265 -	notify_via_evtchn(xhci->evtchn);
  35.266 -
  35.267 -        DPRINTK("Queued request for an URB.\n");
  35.268 -        dump_urb(urb);
  35.269 -
  35.270 -        return -EINPROGRESS;
  35.271 -}
  35.272 -
  35.273 -/**
  35.274 - * xhci_queue_probe - queue a probe request for a particular port
  35.275 - */
  35.276 -static inline usbif_request_t *xhci_queue_probe(usbif_vdev_t port)
  35.277 -{
  35.278 -        usbif_request_t *req;
  35.279 -        usbif_front_ring_t *usb_ring = &xhci->usb_ring;
  35.280 -
  35.281 -#if DEBUG
  35.282 -	printk(KERN_DEBUG
  35.283 -               "queuing probe: req_prod = %d (@ 0x%lx), resp_prod = %d, "
  35.284 -               "resp_cons = %d\n", usbif->req_prod,
  35.285 -               virt_to_mfn(&usbif->req_prod),
  35.286 -	       usbif->resp_prod, xhci->usb_resp_cons);
  35.287 -#endif
  35.288 - 
  35.289 -        /* This is always called from the timer interrupt. */
  35.290 -        spin_lock(&xhci->ring_lock);
  35.291 -       
  35.292 -        if ( RING_PLUGGED(xhci) )
  35.293 -        {
  35.294 -                printk(KERN_WARNING
  35.295 -                       "xhci_queue_probe(): ring full, not queuing request\n");
  35.296 -                spin_unlock(&xhci->ring_lock);
  35.297 -                return NULL;
  35.298 -        }
  35.299 -
  35.300 -        /* Stick something in the shared communications ring. */
  35.301 -        req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
  35.302 -
  35.303 -        memset(req, 0, sizeof(*req));
  35.304 -
  35.305 -        req->operation       = USBIF_OP_PROBE;
  35.306 -        req->port            = port;
  35.307 -
  35.308 -        usb_ring->req_prod_pvt++;
  35.309 -        RING_PUSH_REQUESTS(usb_ring);
  35.310 -
  35.311 -        spin_unlock(&xhci->ring_lock);
  35.312 -
  35.313 -	notify_via_evtchn(xhci->evtchn);
  35.314 -
  35.315 -        return req;
  35.316 -}
  35.317 -
  35.318 -/**
  35.319 - * xhci_port_reset - queue a reset request for a particular port
  35.320 - */
  35.321 -static int xhci_port_reset(usbif_vdev_t port)
  35.322 -{
  35.323 -        usbif_request_t *req;
  35.324 -        usbif_front_ring_t *usb_ring = &xhci->usb_ring;
  35.325 -
  35.326 -        /* Only ever happens from process context (hub thread). */
  35.327 -        spin_lock_irq(&xhci->ring_lock);
  35.328 -
  35.329 -        if ( RING_PLUGGED(xhci) )
  35.330 -        {
  35.331 -                printk(KERN_WARNING
  35.332 -                       "xhci_port_reset(): ring plugged, not queuing request\n");
  35.333 -                spin_unlock_irq(&xhci->ring_lock);
  35.334 -                return -ENOBUFS;
  35.335 -        }
  35.336 -
  35.337 -        /* We only reset one port at a time, so we only need one variable per
  35.338 -         * hub. */
  35.339 -        xhci->awaiting_reset = 1;
  35.340 -        
  35.341 -        /* Stick something in the shared communications ring. */
  35.342 -	req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
  35.343 -
  35.344 -        memset(req, 0, sizeof(*req));
  35.345 -
  35.346 -        req->operation       = USBIF_OP_RESET;
  35.347 -        req->port            = port;
  35.348 -        
  35.349 -        usb_ring->req_prod_pvt++;
  35.350 -	RING_PUSH_REQUESTS(usb_ring);
  35.351 -
  35.352 -        spin_unlock_irq(&xhci->ring_lock);
  35.353 -
  35.354 -	notify_via_evtchn(xhci->evtchn);
  35.355 -
  35.356 -        while ( xhci->awaiting_reset > 0 )
  35.357 -        {
  35.358 -                mdelay(1);
  35.359 -                xhci_drain_ring();
  35.360 -        }
  35.361 -
  35.362 -	xhci->rh.ports[port].pe = 1;
  35.363 -	xhci->rh.ports[port].pe_chg = 1;
  35.364 -
  35.365 -        return xhci->awaiting_reset;
  35.366 -}
  35.367 -
  35.368 -
  35.369 -/******************************************************************************
  35.370 - * RING RESPONSE HANDLING
  35.371 - */
  35.372 -
  35.373 -static void receive_usb_reset(usbif_response_t *resp)
  35.374 -{
  35.375 -    xhci->awaiting_reset = resp->status;
  35.376 -    rmb();
  35.377 -    
  35.378 -}
  35.379 -
  35.380 -static void receive_usb_probe(usbif_response_t *resp)
  35.381 -{
  35.382 -    spin_lock(&xhci->rh.port_state_lock);
  35.383 -
  35.384 -    if ( resp->status >= 0 )
  35.385 -    {
  35.386 -        if ( resp->status == 1 )
  35.387 -        {
  35.388 -            /* If theres a device there and there wasn't one before there must
  35.389 -             * have been a connection status change. */
  35.390 -            if( xhci->rh.ports[resp->data].cs == 0 )
  35.391 -	    {
  35.392 -                xhci->rh.ports[resp->data].cs = 1;
  35.393 -                xhci->rh.ports[resp->data].cs_chg = 1;
  35.394 -	    }
  35.395 -        }
  35.396 -        else if ( resp->status == 0 )
  35.397 -        {
  35.398 -            if(xhci->rh.ports[resp->data].cs == 1 )
  35.399 -            {
  35.400 -                xhci->rh.ports[resp->data].cs  = 0;
  35.401 -                xhci->rh.ports[resp->data].cs_chg = 1;
  35.402 -		xhci->rh.ports[resp->data].pe = 0;
  35.403 -		/* According to USB Spec v2.0, 11.24.2.7.2.2, we don't need
  35.404 -		 * to set pe_chg since an error has not occurred. */
  35.405 -            }
  35.406 -        }
  35.407 -        else
  35.408 -            printk(KERN_WARNING "receive_usb_probe(): unexpected status %d "
  35.409 -                   "for port %d\n", resp->status, resp->data);
  35.410 -    }
  35.411 -    else if ( resp->status < 0)
  35.412 -        printk(KERN_WARNING "receive_usb_probe(): got error status %d\n",
  35.413 -               resp->status);
  35.414 -
  35.415 -    spin_unlock(&xhci->rh.port_state_lock);
  35.416 -}
  35.417 -
  35.418 -static void receive_usb_io(usbif_response_t *resp)
  35.419 -{
  35.420 -        struct urb_priv *urbp = (struct urb_priv *)resp->id;
  35.421 -        struct urb *urb = urbp->urb;
  35.422 -
  35.423 -        urb->actual_length = resp->length;
  35.424 -        urbp->in_progress = 0;
  35.425 -
  35.426 -        if( usb_pipetype(urb->pipe) == 0 ) /* ISO */
  35.427 -        {
  35.428 -                int i;
  35.429 -              
  35.430 -                /* Copy ISO schedule results back in. */
  35.431 -                for ( i = 0; i < urb->number_of_packets; i++ )
  35.432 -                {
  35.433 -                        urb->iso_frame_desc[i].status
  35.434 -                                = urbp->schedule[i].status;
  35.435 -                        urb->iso_frame_desc[i].actual_length
  35.436 -                                = urbp->schedule[i].length;
  35.437 -                }
  35.438 -                free_page((unsigned long)urbp->schedule);
  35.439 -        }
  35.440 -
  35.441 -        /* Only set status if it's not been changed since submission.  It might
  35.442 -         * have been changed if the URB has been unlinked asynchronously, for
  35.443 -         * instance. */
  35.444 -	if ( urb->status == -EINPROGRESS )
  35.445 -                urbp->status = urb->status = resp->status;
  35.446 -}
  35.447 -
  35.448 -/**
  35.449 - * xhci_drain_ring - drain responses from the ring, calling handlers
  35.450 - *
  35.451 - * This may be called from interrupt context when an event is received from the
  35.452 - * backend domain, or sometimes in process context whilst waiting for a port
  35.453 - * reset or URB completion.
  35.454 - */
  35.455 -static void xhci_drain_ring(void)
  35.456 -{
  35.457 -	struct list_head *tmp, *head;
  35.458 -	usbif_front_ring_t *usb_ring = &xhci->usb_ring;
  35.459 -	usbif_response_t *resp;
  35.460 -        RING_IDX i, rp;
  35.461 -
  35.462 -        /* Walk the ring here to get responses, updating URBs to show what
  35.463 -         * completed. */
  35.464 -        
  35.465 -        rp = usb_ring->sring->rsp_prod;
  35.466 -        rmb(); /* Ensure we see queued requests up to 'rp'. */
  35.467 -
  35.468 -        /* Take items off the comms ring, taking care not to overflow. */
  35.469 -        for ( i = usb_ring->rsp_cons; i != rp; i++ )
  35.470 -        {
  35.471 -            resp = RING_GET_RESPONSE(usb_ring, i);
  35.472 -            
  35.473 -            /* May need to deal with batching and with putting a ceiling on
  35.474 -               the number dispatched for performance and anti-dos reasons */
  35.475 -
  35.476 -            xhci_show_resp(resp);
  35.477 -
  35.478 -            switch ( resp->operation )
  35.479 -            {
  35.480 -            case USBIF_OP_PROBE:
  35.481 -                receive_usb_probe(resp);
  35.482 -                break;
  35.483 -                
  35.484 -            case USBIF_OP_IO:
  35.485 -                receive_usb_io(resp);
  35.486 -                break;
  35.487 -
  35.488 -            case USBIF_OP_RESET:
  35.489 -                receive_usb_reset(resp);
  35.490 -                break;
  35.491 -
  35.492 -            default:
  35.493 -                printk(KERN_WARNING
  35.494 -                       "error: unknown USB io operation response [%d]\n",
  35.495 -                       resp->operation);
  35.496 -                break;
  35.497 -            }
  35.498 -        }
  35.499 -
  35.500 -        usb_ring->rsp_cons = i;
  35.501 -
  35.502 -	/* Walk the list of pending URB's to see which ones completed and do
  35.503 -         * callbacks, etc. */
  35.504 -	spin_lock(&xhci->urb_list_lock);
  35.505 -	head = &xhci->urb_list;
  35.506 -	tmp = head->next;
  35.507 -	while (tmp != head) {
  35.508 -		struct urb *urb = list_entry(tmp, struct urb, urb_list);
  35.509 -
  35.510 -		tmp = tmp->next;
  35.511 -
  35.512 -		/* Checks the status and does all of the magic necessary */
  35.513 -		xhci_transfer_result(xhci, urb);
  35.514 -	}
  35.515 -	spin_unlock(&xhci->urb_list_lock);
  35.516 -
  35.517 -	xhci_finish_completion();
  35.518 -}
  35.519 -
  35.520 -
  35.521 -static void xhci_interrupt(int irq, void *__xhci, struct pt_regs *regs)
  35.522 -{
  35.523 -        xhci_drain_ring();
  35.524 -}
  35.525 -
  35.526 -/******************************************************************************
  35.527 - * HOST CONTROLLER FUNCTIONALITY
  35.528 - */
  35.529 -
  35.530 -/**
  35.531 - * no-op implementation of private device alloc / free routines
  35.532 - */
  35.533 -static int xhci_do_nothing_dev(struct usb_device *dev)
  35.534 -{
  35.535 -	return 0;
  35.536 -}
  35.537 -
  35.538 -static inline void xhci_add_complete(struct urb *urb)
  35.539 -{
  35.540 -	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  35.541 -	unsigned long flags;
  35.542 -
  35.543 -	spin_lock_irqsave(&xhci->complete_list_lock, flags);
  35.544 -	list_add_tail(&urbp->complete_list, &xhci->complete_list);
  35.545 -	spin_unlock_irqrestore(&xhci->complete_list_lock, flags);
  35.546 -}
  35.547 -
  35.548 -/* When this returns, the owner of the URB may free its
  35.549 - * storage.
  35.550 - *
  35.551 - * We spin and wait for the URB to complete before returning.
  35.552 - *
  35.553 - * Call with urb->lock acquired.
  35.554 - */
  35.555 -static void xhci_delete_urb(struct urb *urb)
  35.556 -{
  35.557 -        struct urb_priv *urbp;
  35.558 -
  35.559 -	urbp = urb->hcpriv;
  35.560 -
  35.561 -        /* If there's no urb_priv structure for this URB then it can't have
  35.562 -         * been submitted at all. */
  35.563 -	if ( urbp == NULL )
  35.564 -		return;
  35.565 -
  35.566 -	/* For now we just spin until the URB completes.  It shouldn't take too
  35.567 -         * long and we don't expect to have to do this very often. */
  35.568 -	while ( urb->status == -EINPROGRESS )
  35.569 -        {
  35.570 -            xhci_drain_ring();
  35.571 -            mdelay(1);
  35.572 -        }
  35.573 -
  35.574 -	/* Now we know that further transfers to the buffer won't
  35.575 -	 * occur, so we can safely return. */
  35.576 -}
  35.577 -
  35.578 -static struct urb_priv *xhci_alloc_urb_priv(struct urb *urb)
  35.579 -{
  35.580 -	struct urb_priv *urbp;
  35.581 -
  35.582 -	urbp = kmem_cache_alloc(xhci_up_cachep, SLAB_ATOMIC);
  35.583 -	if (!urbp) {
  35.584 -		err("xhci_alloc_urb_priv: couldn't allocate memory for urb_priv\n");
  35.585 -		return NULL;
  35.586 -	}
  35.587 -
  35.588 -	memset((void *)urbp, 0, sizeof(*urbp));
  35.589 -
  35.590 -	urbp->inserttime = jiffies;
  35.591 -	urbp->urb = urb;
  35.592 -	urbp->dev = urb->dev;
  35.593 -	
  35.594 -	INIT_LIST_HEAD(&urbp->complete_list);
  35.595 -
  35.596 -	urb->hcpriv = urbp;
  35.597 -
  35.598 -	return urbp;
  35.599 -}
  35.600 -
  35.601 -/*
  35.602 - * MUST be called with urb->lock acquired
  35.603 - */
  35.604 -/* When is this called?  Do we need to stop the transfer (as we
  35.605 - * currently do)? */
  35.606 -static void xhci_destroy_urb_priv(struct urb *urb)
  35.607 -{
  35.608 -    struct urb_priv *urbp;
  35.609 -    
  35.610 -    urbp = (struct urb_priv *)urb->hcpriv;
  35.611 -    if (!urbp)
  35.612 -        return;
  35.613 -
  35.614 -    if (!list_empty(&urb->urb_list))
  35.615 -        warn("xhci_destroy_urb_priv: urb %p still on xhci->urb_list", urb);
  35.616 -    
  35.617 -    if (!list_empty(&urbp->complete_list))
  35.618 -        warn("xhci_destroy_urb_priv: urb %p still on xhci->complete_list", urb);
  35.619 -    
  35.620 -    kmem_cache_free(xhci_up_cachep, urb->hcpriv);
  35.621 -
  35.622 -    urb->hcpriv = NULL;
  35.623 -}
  35.624 -
  35.625 -/**
  35.626 - * Try to find URBs in progress on the same pipe to the same device.
  35.627 - *
  35.628 - * MUST be called with xhci->urb_list_lock acquired
  35.629 - */
  35.630 -static struct urb *xhci_find_urb_ep(struct xhci *xhci, struct urb *urb)
  35.631 -{
  35.632 -	struct list_head *tmp, *head;
  35.633 -
  35.634 -	/* We don't match Isoc transfers since they are special */
  35.635 -	if (usb_pipeisoc(urb->pipe))
  35.636 -		return NULL;
  35.637 -
  35.638 -	head = &xhci->urb_list;
  35.639 -	tmp = head->next;
  35.640 -	while (tmp != head) {
  35.641 -		struct urb *u = list_entry(tmp, struct urb, urb_list);
  35.642 -
  35.643 -		tmp = tmp->next;
  35.644 -
  35.645 -		if (u->dev == urb->dev && u->pipe == urb->pipe &&
  35.646 -		    u->status == -EINPROGRESS)
  35.647 -			return u;
  35.648 -	}
  35.649 -
  35.650 -	return NULL;
  35.651 -}
  35.652 -
  35.653 -static int xhci_submit_urb(struct urb *urb)
  35.654 -{
  35.655 -	int ret = -EINVAL;
  35.656 -	unsigned long flags;
  35.657 -	struct urb *eurb;
  35.658 -	int bustime;
  35.659 -
  35.660 -        DPRINTK("URB submitted to XHCI driver.\n");
  35.661 -        dump_urb(urb);
  35.662 -
  35.663 -	if (!urb)
  35.664 -		return -EINVAL;
  35.665 -
  35.666 -	if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv) {
  35.667 -		warn("xhci_submit_urb: urb %p belongs to disconnected device or bus?", urb);
  35.668 -		return -ENODEV;
  35.669 -	}
  35.670 -
  35.671 -        if ( urb->dev->devpath == NULL )
  35.672 -                BUG();
  35.673 -
  35.674 -	usb_inc_dev_use(urb->dev);
  35.675 -
  35.676 -	spin_lock_irqsave(&xhci->urb_list_lock, flags);
  35.677 -	spin_lock(&urb->lock);
  35.678 -
  35.679 -	if (urb->status == -EINPROGRESS || urb->status == -ECONNRESET ||
  35.680 -	    urb->status == -ECONNABORTED) {
  35.681 -		dbg("xhci_submit_urb: urb not available to submit (status = %d)", urb->status);
  35.682 -		/* Since we can have problems on the out path */
  35.683 -		spin_unlock(&urb->lock);
  35.684 -		spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
  35.685 -		usb_dec_dev_use(urb->dev);
  35.686 -
  35.687 -		return ret;
  35.688 -	}
  35.689 -
  35.690 -	INIT_LIST_HEAD(&urb->urb_list);
  35.691 -	if (!xhci_alloc_urb_priv(urb)) {
  35.692 -		ret = -ENOMEM;
  35.693 -
  35.694 -		goto out;
  35.695 -	}
  35.696 -
  35.697 -        ( (struct urb_priv *)urb->hcpriv )->in_progress = 1;
  35.698 -
  35.699 -	eurb = xhci_find_urb_ep(xhci, urb);
  35.700 -	if (eurb && !(urb->transfer_flags & USB_QUEUE_BULK)) {
  35.701 -		ret = -ENXIO;
  35.702 -
  35.703 -		goto out;
  35.704 -	}
  35.705 -
  35.706 -	/* Short circuit the virtual root hub */
  35.707 -	if (urb->dev == xhci->rh.dev) {
  35.708 -		ret = rh_submit_urb(urb);
  35.709 -
  35.710 -		goto out;
  35.711 -	}
  35.712 -
  35.713 -	switch (usb_pipetype(urb->pipe)) {
  35.714 -	case PIPE_CONTROL:
  35.715 -	case PIPE_BULK:
  35.716 -		ret = xhci_queue_req(urb);
  35.717 -		break;
  35.718 -
  35.719 -	case PIPE_INTERRUPT:
  35.720 -		if (urb->bandwidth == 0) {	/* not yet checked/allocated */
  35.721 -			bustime = usb_check_bandwidth(urb->dev, urb);
  35.722 -			if (bustime < 0)
  35.723 -				ret = bustime;
  35.724 -			else {
  35.725 -				ret = xhci_queue_req(urb);
  35.726 -				if (ret == -EINPROGRESS)
  35.727 -					usb_claim_bandwidth(urb->dev, urb,
  35.728 -                                                            bustime, 0);
  35.729 -			}
  35.730 -		} else		/* bandwidth is already set */
  35.731 -			ret = xhci_queue_req(urb);
  35.732 -		break;
  35.733 -
  35.734 -	case PIPE_ISOCHRONOUS:
  35.735 -		if (urb->bandwidth == 0) {	/* not yet checked/allocated */
  35.736 -			if (urb->number_of_packets <= 0) {
  35.737 -				ret = -EINVAL;
  35.738 -				break;
  35.739 -			}
  35.740 -			bustime = usb_check_bandwidth(urb->dev, urb);
  35.741 -			if (bustime < 0) {
  35.742 -				ret = bustime;
  35.743 -				break;
  35.744 -			}
  35.745 -
  35.746 -			ret = xhci_queue_req(urb);
  35.747 -			if (ret == -EINPROGRESS)
  35.748 -				usb_claim_bandwidth(urb->dev, urb, bustime, 1);
  35.749 -		} else		/* bandwidth is already set */
  35.750 -			ret = xhci_queue_req(urb);
  35.751 -		break;
  35.752 -	}
  35.753 -out:
  35.754 -	urb->status = ret;
  35.755 -
  35.756 -	if (ret == -EINPROGRESS) {
  35.757 -		/* We use _tail to make find_urb_ep more efficient */
  35.758 -		list_add_tail(&urb->urb_list, &xhci->urb_list);
  35.759 -
  35.760 -		spin_unlock(&urb->lock);
  35.761 -		spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
  35.762 -
  35.763 -		return 0;
  35.764 -	}
  35.765 -
  35.766 -	xhci_delete_urb(urb);
  35.767 -
  35.768 -	spin_unlock(&urb->lock);
  35.769 -	spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
  35.770 -
  35.771 -	/* Only call completion if it was successful */
  35.772 -	if (!ret)
  35.773 -		xhci_call_completion(urb);
  35.774 -
  35.775 -	return ret;
  35.776 -}
  35.777 -
  35.778 -/*
  35.779 - * Return the result of a transfer
  35.780 - *
  35.781 - * MUST be called with urb_list_lock acquired
  35.782 - */
  35.783 -static void xhci_transfer_result(struct xhci *xhci, struct urb *urb)
  35.784 -{
  35.785 -	int ret = 0;
  35.786 -	unsigned long flags;
  35.787 -	struct urb_priv *urbp;
  35.788 -
  35.789 -	/* The root hub is special */
  35.790 -	if (urb->dev == xhci->rh.dev)
  35.791 -		return;
  35.792 -
  35.793 -	spin_lock_irqsave(&urb->lock, flags);
  35.794 -
  35.795 -	urbp = (struct urb_priv *)urb->hcpriv;
  35.796 -
  35.797 -        if ( ( (struct urb_priv *)urb->hcpriv )->in_progress )
  35.798 -                ret = -EINPROGRESS;
  35.799 -
  35.800 -        if (urb->actual_length < urb->transfer_buffer_length) {
  35.801 -                if (urb->transfer_flags & USB_DISABLE_SPD) {
  35.802 -                        ret = -EREMOTEIO;
  35.803 -                }
  35.804 -        }
  35.805 -
  35.806 -	if (urb->status == -EPIPE)
  35.807 -        {
  35.808 -                ret = urb->status;
  35.809 -		/* endpoint has stalled - mark it halted */
  35.810 -		usb_endpoint_halt(urb->dev, usb_pipeendpoint(urb->pipe),
  35.811 -                                  usb_pipeout(urb->pipe));
  35.812 -        }
  35.813 -
  35.814 -	if ((debug == 1 && ret != 0 && ret != -EPIPE) ||
  35.815 -            (ret != 0 && debug > 1)) {
  35.816 -		/* Some debugging code */
  35.817 -		dbg("xhci_result_interrupt/bulk() failed with status %x",
  35.818 -			status);
  35.819 -	}
  35.820 -
  35.821 -	if (ret == -EINPROGRESS)
  35.822 -		goto out;
  35.823 -
  35.824 -	switch (usb_pipetype(urb->pipe)) {
  35.825 -	case PIPE_CONTROL:
  35.826 -	case PIPE_BULK:
  35.827 -	case PIPE_ISOCHRONOUS:
  35.828 -		/* Release bandwidth for Interrupt or Isoc. transfers */
  35.829 -		/* Spinlock needed ? */
  35.830 -		if (urb->bandwidth)
  35.831 -			usb_release_bandwidth(urb->dev, urb, 1);
  35.832 -		xhci_delete_urb(urb);
  35.833 -		break;
  35.834 -	case PIPE_INTERRUPT:
  35.835 -		/* Interrupts are an exception */
  35.836 -		if (urb->interval)
  35.837 -			goto out_complete;
  35.838 -
  35.839 -		/* Release bandwidth for Interrupt or Isoc. transfers */
  35.840 -		/* Spinlock needed ? */
  35.841 -		if (urb->bandwidth)
  35.842 -			usb_release_bandwidth(urb->dev, urb, 0);
  35.843 -		xhci_delete_urb(urb);
  35.844 -		break;
  35.845 -	default:
  35.846 -		info("xhci_transfer_result: unknown pipe type %d for urb %p\n",
  35.847 -                     usb_pipetype(urb->pipe), urb);
  35.848 -	}
  35.849 -
  35.850 -	/* Remove it from xhci->urb_list */
  35.851 -	list_del_init(&urb->urb_list);
  35.852 -
  35.853 -out_complete:
  35.854 -	xhci_add_complete(urb);
  35.855 -
  35.856 -out:
  35.857 -	spin_unlock_irqrestore(&urb->lock, flags);
  35.858 -}
  35.859 -
  35.860 -static int xhci_unlink_urb(struct urb *urb)
  35.861 -{
  35.862 -	unsigned long flags;
  35.863 -	struct urb_priv *urbp = urb->hcpriv;
  35.864 -
  35.865 -	if (!urb)
  35.866 -		return -EINVAL;
  35.867 -
  35.868 -	if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv)
  35.869 -		return -ENODEV;
  35.870 -
  35.871 -	spin_lock_irqsave(&xhci->urb_list_lock, flags);
  35.872 -	spin_lock(&urb->lock);
  35.873 -
  35.874 -	/* Release bandwidth for Interrupt or Isoc. transfers */
  35.875 -	/* Spinlock needed ? */
  35.876 -	if (urb->bandwidth) {
  35.877 -		switch (usb_pipetype(urb->pipe)) {
  35.878 -		case PIPE_INTERRUPT:
  35.879 -			usb_release_bandwidth(urb->dev, urb, 0);
  35.880 -			break;
  35.881 -		case PIPE_ISOCHRONOUS:
  35.882 -			usb_release_bandwidth(urb->dev, urb, 1);
  35.883 -			break;
  35.884 -		default:
  35.885 -			break;
  35.886 -		}
  35.887 -	}
  35.888 -
  35.889 -	if (urb->status != -EINPROGRESS) {
  35.890 -		spin_unlock(&urb->lock);
  35.891 -		spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
  35.892 -		return 0;
  35.893 -	}
  35.894 -
  35.895 -	list_del_init(&urb->urb_list);
  35.896 -
  35.897 -	/* Short circuit the virtual root hub */
  35.898 -	if (urb->dev == xhci->rh.dev) {
  35.899 -		rh_unlink_urb(urb);
  35.900 -
  35.901 -		spin_unlock(&urb->lock);
  35.902 -		spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
  35.903 -
  35.904 -		xhci_call_completion(urb);
  35.905 -	} else {
  35.906 -		if (urb->transfer_flags & USB_ASYNC_UNLINK) {
  35.907 -                        /* We currently don't currently attempt to cancel URBs
  35.908 -                         * that have been queued in the ring.  We handle async
  35.909 -                         * unlinked URBs when they complete. */
  35.910 -			urbp->status = urb->status = -ECONNABORTED;
  35.911 -			spin_unlock(&urb->lock);
  35.912 -			spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
  35.913 -		} else {
  35.914 -			urb->status = -ENOENT;
  35.915 -
  35.916 -			spin_unlock(&urb->lock);
  35.917 -			spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
  35.918 -
  35.919 -			if (in_interrupt()) {	/* wait at least 1 frame */
  35.920 -				static int errorcount = 10;
  35.921 -
  35.922 -				if (errorcount--)
  35.923 -					dbg("xhci_unlink_urb called from interrupt for urb %p", urb);
  35.924 -				udelay(1000);
  35.925 -			} else
  35.926 -				schedule_timeout(1+1*HZ/1000); 
  35.927 -
  35.928 -                        xhci_delete_urb(urb);
  35.929 -
  35.930 -			xhci_call_completion(urb);
  35.931 -		}
  35.932 -	}
  35.933 -
  35.934 -	return 0;
  35.935 -}
  35.936 -
  35.937 -static void xhci_call_completion(struct urb *urb)
  35.938 -{
  35.939 -	struct urb_priv *urbp;
  35.940 -	struct usb_device *dev = urb->dev;
  35.941 -	int is_ring = 0, killed, resubmit_interrupt, status;
  35.942 -	struct urb *nurb;
  35.943 -	unsigned long flags;
  35.944 -
  35.945 -	spin_lock_irqsave(&urb->lock, flags);
  35.946 -
  35.947 -	urbp = (struct urb_priv *)urb->hcpriv;
  35.948 -	if (!urbp || !urb->dev) {
  35.949 -		spin_unlock_irqrestore(&urb->lock, flags);
  35.950 -		return;
  35.951 -	}
  35.952 -
  35.953 -	killed = (urb->status == -ENOENT || urb->status == -ECONNABORTED ||
  35.954 -			urb->status == -ECONNRESET);
  35.955 -	resubmit_interrupt = (usb_pipetype(urb->pipe) == PIPE_INTERRUPT &&
  35.956 -			urb->interval);
  35.957 -
  35.958 -	nurb = urb->next;
  35.959 -	if (nurb && !killed) {
  35.960 -		int count = 0;
  35.961 -
  35.962 -		while (nurb && nurb != urb && count < MAX_URB_LOOP) {
  35.963 -			if (nurb->status == -ENOENT ||
  35.964 -			    nurb->status == -ECONNABORTED ||
  35.965 -			    nurb->status == -ECONNRESET) {
  35.966 -				killed = 1;
  35.967 -				break;
  35.968 -			}
  35.969 -
  35.970 -			nurb = nurb->next;
  35.971 -			count++;
  35.972 -		}
  35.973 -
  35.974 -		if (count == MAX_URB_LOOP)
  35.975 -			err("xhci_call_completion: too many linked URB's, loop? (first loop)");
  35.976 -
  35.977 -		/* Check to see if chain is a ring */
  35.978 -		is_ring = (nurb == urb);
  35.979 -	}
  35.980 -
  35.981 -	status = urbp->status;
  35.982 -	if (!resubmit_interrupt || killed)
  35.983 -		/* We don't need urb_priv anymore */
  35.984 -		xhci_destroy_urb_priv(urb);
  35.985 -
  35.986 -	if (!killed)
  35.987 -		urb->status = status;
  35.988 -
  35.989 -	spin_unlock_irqrestore(&urb->lock, flags);
  35.990 -
  35.991 -	if (urb->complete)
  35.992 -		urb->complete(urb);
  35.993 -
  35.994 -	if (resubmit_interrupt)
  35.995 -		/* Recheck the status. The completion handler may have */
  35.996 -		/*  unlinked the resubmitting interrupt URB */
  35.997 -		killed = (urb->status == -ENOENT ||
  35.998 -			  urb->status == -ECONNABORTED ||
  35.999 -			  urb->status == -ECONNRESET);
 35.1000 -
 35.1001 -	if (resubmit_interrupt && !killed) {
 35.1002 -                if ( urb->dev != xhci->rh.dev )
 35.1003 -                        xhci_queue_req(urb); /* XXX What if this fails? */
 35.1004 -                /* Don't need to resubmit URBs for the virtual root dev. */
 35.1005 -	} else {
 35.1006 -		if (is_ring && !killed) {
 35.1007 -			urb->dev = dev;
 35.1008 -			xhci_submit_urb(urb);
 35.1009 -		} else {
 35.1010 -			/* We decrement the usage count after we're done */
 35.1011 -			/*  with everything */
 35.1012 -			usb_dec_dev_use(dev);
 35.1013 -		}
 35.1014 -	}
 35.1015 -}
 35.1016 -
 35.1017 -static void xhci_finish_completion(void)
 35.1018 -{
 35.1019 -	struct list_head *tmp, *head;
 35.1020 -	unsigned long flags;
 35.1021 -
 35.1022 -	spin_lock_irqsave(&xhci->complete_list_lock, flags);
 35.1023 -	head = &xhci->complete_list;
 35.1024 -	tmp = head->next;
 35.1025 -	while (tmp != head) {
 35.1026 -		struct urb_priv *urbp = list_entry(tmp, struct urb_priv,
 35.1027 -                                                   complete_list);
 35.1028 -		struct urb *urb = urbp->urb;
 35.1029 -
 35.1030 -		list_del_init(&urbp->complete_list);
 35.1031 -		spin_unlock_irqrestore(&xhci->complete_list_lock, flags);
 35.1032 -
 35.1033 -		xhci_call_completion(urb);
 35.1034 -
 35.1035 -		spin_lock_irqsave(&xhci->complete_list_lock, flags);
 35.1036 -		head = &xhci->complete_list;
 35.1037 -		tmp = head->next;
 35.1038 -	}
 35.1039 -	spin_unlock_irqrestore(&xhci->complete_list_lock, flags);
 35.1040 -}
 35.1041 -
 35.1042 -static struct usb_operations xhci_device_operations = {
 35.1043 -	.allocate = xhci_do_nothing_dev,
 35.1044 -	.deallocate = xhci_do_nothing_dev,
 35.1045 -        /* It doesn't look like any drivers actually care what the frame number
 35.1046 -	 * is at the moment!  If necessary, we could approximate the current
 35.1047 -	 * frame nubmer by passing it from the backend in response messages. */
 35.1048 -	.get_frame_number = NULL,
 35.1049 -	.submit_urb = xhci_submit_urb,
 35.1050 -	.unlink_urb = xhci_unlink_urb
 35.1051 -};
 35.1052 -
 35.1053 -/******************************************************************************
 35.1054 - * VIRTUAL ROOT HUB EMULATION
 35.1055 - */
 35.1056 -
 35.1057 -static __u8 root_hub_dev_des[] =
 35.1058 -{
 35.1059 - 	0x12,			/*  __u8  bLength; */
 35.1060 -	0x01,			/*  __u8  bDescriptorType; Device */
 35.1061 -	0x00,			/*  __u16 bcdUSB; v1.0 */
 35.1062 -	0x01,
 35.1063 -	0x09,			/*  __u8  bDeviceClass; HUB_CLASSCODE */
 35.1064 -	0x00,			/*  __u8  bDeviceSubClass; */
 35.1065 -	0x00,			/*  __u8  bDeviceProtocol; */
 35.1066 -	0x08,			/*  __u8  bMaxPacketSize0; 8 Bytes */
 35.1067 -	0x00,			/*  __u16 idVendor; */
 35.1068 -	0x00,
 35.1069 -	0x00,			/*  __u16 idProduct; */
 35.1070 -	0x00,
 35.1071 -	0x00,			/*  __u16 bcdDevice; */
 35.1072 -	0x00,
 35.1073 -	0x00,			/*  __u8  iManufacturer; */
 35.1074 -	0x02,			/*  __u8  iProduct; */
 35.1075 -	0x01,			/*  __u8  iSerialNumber; */
 35.1076 -	0x01			/*  __u8  bNumConfigurations; */
 35.1077 -};
 35.1078 -
 35.1079 -
 35.1080 -/* Configuration descriptor */
 35.1081 -static __u8 root_hub_config_des[] =
 35.1082 -{
 35.1083 -	0x09,			/*  __u8  bLength; */
 35.1084 -	0x02,			/*  __u8  bDescriptorType; Configuration */
 35.1085 -	0x19,			/*  __u16 wTotalLength; */
 35.1086 -	0x00,
 35.1087 -	0x01,			/*  __u8  bNumInterfaces; */
 35.1088 -	0x01,			/*  __u8  bConfigurationValue; */
 35.1089 -	0x00,			/*  __u8  iConfiguration; */
 35.1090 -	0x40,			/*  __u8  bmAttributes;
 35.1091 -					Bit 7: Bus-powered, 6: Self-powered,
 35.1092 -					Bit 5 Remote-wakeup, 4..0: resvd */
 35.1093 -	0x00,			/*  __u8  MaxPower; */
 35.1094 -
 35.1095 -	/* interface */
 35.1096 -	0x09,			/*  __u8  if_bLength; */
 35.1097 -	0x04,			/*  __u8  if_bDescriptorType; Interface */
 35.1098 -	0x00,			/*  __u8  if_bInterfaceNumber; */
 35.1099 -	0x00,			/*  __u8  if_bAlternateSetting; */
 35.1100 -	0x01,			/*  __u8  if_bNumEndpoints; */
 35.1101 -	0x09,			/*  __u8  if_bInterfaceClass; HUB_CLASSCODE */
 35.1102 -	0x00,			/*  __u8  if_bInterfaceSubClass; */
 35.1103 -	0x00,			/*  __u8  if_bInterfaceProtocol; */
 35.1104 -	0x00,			/*  __u8  if_iInterface; */
 35.1105 -
 35.1106 -	/* endpoint */
 35.1107 -	0x07,			/*  __u8  ep_bLength; */
 35.1108 -	0x05,			/*  __u8  ep_bDescriptorType; Endpoint */
 35.1109 -	0x81,			/*  __u8  ep_bEndpointAddress; IN Endpoint 1 */
 35.1110 -	0x03,			/*  __u8  ep_bmAttributes; Interrupt */
 35.1111 -	0x08,			/*  __u16 ep_wMaxPacketSize; 8 Bytes */
 35.1112 -	0x00,
 35.1113 -	0xff			/*  __u8  ep_bInterval; 255 ms */
 35.1114 -};
 35.1115 -
 35.1116 -static __u8 root_hub_hub_des[] =
 35.1117 -{
 35.1118 -	0x09,			/*  __u8  bLength; */
 35.1119 -	0x29,			/*  __u8  bDescriptorType; Hub-descriptor */
 35.1120 -	0x02,			/*  __u8  bNbrPorts; */
 35.1121 -	0x00,			/* __u16  wHubCharacteristics; */
 35.1122 -	0x00,
 35.1123 -	0x01,			/*  __u8  bPwrOn2pwrGood; 2ms */
 35.1124 -	0x00,			/*  __u8  bHubContrCurrent; 0 mA */
 35.1125 -	0x00,			/*  __u8  DeviceRemovable; *** 7 Ports max *** */
 35.1126 -	0xff			/*  __u8  PortPwrCtrlMask; *** 7 ports max *** */
 35.1127 -};
 35.1128 -
 35.1129 -/* prepare Interrupt pipe transaction data; HUB INTERRUPT ENDPOINT */
 35.1130 -static int rh_send_irq(struct urb *urb)
 35.1131 -{
 35.1132 -	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
 35.1133 -        xhci_port_t *ports = xhci->rh.ports;
 35.1134 -	unsigned long flags;
 35.1135 -	int i, len = 1;
 35.1136 -	__u16 data = 0;
 35.1137 -
 35.1138 -	spin_lock_irqsave(&urb->lock, flags);
 35.1139 -	for (i = 0; i < xhci->rh.numports; i++) {
 35.1140 -                /* Set a bit if anything at all has changed on the port, as per
 35.1141 -		 * USB spec 11.12 */
 35.1142 -		data |= (ports[i].cs_chg || ports[i].pe_chg )
 35.1143 -                        ? (1 << (i + 1))
 35.1144 -                        : 0;
 35.1145 -
 35.1146 -		len = (i + 1) / 8 + 1;
 35.1147 -	}
 35.1148 -
 35.1149 -	*(__u16 *) urb->transfer_buffer = cpu_to_le16(data);
 35.1150 -	urb->actual_length = len;
 35.1151 -	urbp->status = 0;
 35.1152 -
 35.1153 -	spin_unlock_irqrestore(&urb->lock, flags);
 35.1154 -
 35.1155 -	if ((data > 0) && (xhci->rh.send != 0)) {
 35.1156 -		dbg("root-hub INT complete: data: %x", data);
 35.1157 -		xhci_call_completion(urb);
 35.1158 -	}
 35.1159 -
 35.1160 -	return 0;
 35.1161 -}
 35.1162 -
 35.1163 -/* Virtual Root Hub INTs are polled by this timer every "interval" ms */
 35.1164 -static int rh_init_int_timer(struct urb *urb);
 35.1165 -
 35.1166 -static void rh_int_timer_do(unsigned long ptr)
 35.1167 -{
 35.1168 -	struct urb *urb = (struct urb *)ptr;
 35.1169 -	struct list_head list, *tmp, *head;
 35.1170 -	unsigned long flags;
 35.1171 -	int i;
 35.1172 -
 35.1173 -	for ( i = 0; i < xhci->rh.numports; i++)
 35.1174 -                xhci_queue_probe(i);
 35.1175 -
 35.1176 -	if (xhci->rh.send)
 35.1177 -		rh_send_irq(urb);
 35.1178 -
 35.1179 -	INIT_LIST_HEAD(&list);
 35.1180 -
 35.1181 -	spin_lock_irqsave(&xhci->urb_list_lock, flags);
 35.1182 -	head = &xhci->urb_list;
 35.1183 -	tmp = head->next;
 35.1184 -	while (tmp != head) {
 35.1185 -		struct urb *u = list_entry(tmp, struct urb, urb_list);
 35.1186 -		struct urb_priv *up = (struct urb_priv *)u->hcpriv;
 35.1187 -
 35.1188 -		tmp = tmp->next;
 35.1189 -
 35.1190 -		spin_lock(&u->lock);
 35.1191 -
 35.1192 -		/* Check if the URB timed out */
 35.1193 -		if (u->timeout && time_after_eq(jiffies,
 35.1194 -                                                up->inserttime + u->timeout)) {
 35.1195 -			list_del(&u->urb_list);
 35.1196 -			list_add_tail(&u->urb_list, &list);
 35.1197 -		}
 35.1198 -
 35.1199 -		spin_unlock(&u->lock);
 35.1200 -	}
 35.1201 -	spin_unlock_irqrestore(&xhci->urb_list_lock, flags);
 35.1202 -
 35.1203 -	head = &list;
 35.1204 -	tmp = head->next;
 35.1205 -	while (tmp != head) {
 35.1206 -		struct urb *u = list_entry(tmp, struct urb, urb_list);
 35.1207 -
 35.1208 -		tmp = tmp->next;
 35.1209 -
 35.1210 -		u->transfer_flags |= USB_ASYNC_UNLINK | USB_TIMEOUT_KILLED;
 35.1211 -		xhci_unlink_urb(u);
 35.1212 -	}
 35.1213 -
 35.1214 -	rh_init_int_timer(urb);
 35.1215 -}
 35.1216 -
 35.1217 -/* Root Hub INTs are polled by this timer */
 35.1218 -static int rh_init_int_timer(struct urb *urb)
 35.1219 -{
 35.1220 -	xhci->rh.interval = urb->interval;
 35.1221 -	init_timer(&xhci->rh.rh_int_timer);
 35.1222 -	xhci->rh.rh_int_timer.function = rh_int_timer_do;
 35.1223 -	xhci->rh.rh_int_timer.data = (unsigned long)urb;
 35.1224 -	xhci->rh.rh_int_timer.expires = jiffies
 35.1225 -                + (HZ * (urb->interval < 30 ? 30 : urb->interval)) / 1000;
 35.1226 -	add_timer(&xhci->rh.rh_int_timer);
 35.1227 -
 35.1228 -	return 0;
 35.1229 -}
 35.1230 -
 35.1231 -#define OK(x)			len = (x); break
 35.1232 -
 35.1233 -/* Root Hub Control Pipe */
 35.1234 -static int rh_submit_urb(struct urb *urb)
 35.1235 -{
 35.1236 -	unsigned int pipe = urb->pipe;
 35.1237 -	struct usb_ctrlrequest *cmd =
 35.1238 -                (struct usb_ctrlrequest *)urb->setup_packet;
 35.1239 -	void *data = urb->transfer_buffer;
 35.1240 -	int leni = urb->transfer_buffer_length;
 35.1241 -	int len = 0;
 35.1242 -	xhci_port_t *status;
 35.1243 -	int stat = 0;
 35.1244 -	int i;
 35.1245 -	int retstatus;
 35.1246 -        unsigned long flags;
 35.1247 -        
 35.1248 -	__u16 cstatus;
 35.1249 -	__u16 bmRType_bReq;
 35.1250 -	__u16 wValue;
 35.1251 -	__u16 wIndex;
 35.1252 -	__u16 wLength;
 35.1253 -
 35.1254 -	if (usb_pipetype(pipe) == PIPE_INTERRUPT) {
 35.1255 -		xhci->rh.urb = urb;
 35.1256 -		xhci->rh.send = 1;
 35.1257 -		xhci->rh.interval = urb->interval;
 35.1258 -		rh_init_int_timer(urb);
 35.1259 -
 35.1260 -		return -EINPROGRESS;
 35.1261 -	}
 35.1262 -
 35.1263 -	bmRType_bReq = cmd->bRequestType | cmd->bRequest << 8;
 35.1264 -	wValue = le16_to_cpu(cmd->wValue);
 35.1265 -	wIndex = le16_to_cpu(cmd->wIndex);
 35.1266 -	wLength = le16_to_cpu(cmd->wLength);
 35.1267 -
 35.1268 -	for (i = 0; i < 8; i++)
 35.1269 -		xhci->rh.c_p_r[i] = 0;
 35.1270 -
 35.1271 -        status = &xhci->rh.ports[wIndex - 1];
 35.1272 -
 35.1273 -        spin_lock_irqsave(&xhci->rh.port_state_lock, flags);
 35.1274 -
 35.1275 -	switch (bmRType_bReq) {
 35.1276 -		/* Request Destination:
 35.1277 -		   without flags: Device,
 35.1278 -		   RH_INTERFACE: interface,
 35.1279 -		   RH_ENDPOINT: endpoint,
 35.1280 -		   RH_CLASS means HUB here,
 35.1281 -		   RH_OTHER | RH_CLASS  almost ever means HUB_PORT here
 35.1282 -		*/
 35.1283 -
 35.1284 -	case RH_GET_STATUS:
 35.1285 -		*(__u16 *)data = cpu_to_le16(1);
 35.1286 -		OK(2);
 35.1287 -	case RH_GET_STATUS | RH_INTERFACE:
 35.1288 -		*(__u16 *)data = cpu_to_le16(0);
 35.1289 -		OK(2);
 35.1290 -	case RH_GET_STATUS | RH_ENDPOINT:
 35.1291 -		*(__u16 *)data = cpu_to_le16(0);
 35.1292 -		OK(2);
 35.1293 -	case RH_GET_STATUS | RH_CLASS:
 35.1294 -		*(__u32 *)data = cpu_to_le32(0);
 35.1295 -		OK(4);		/* hub power */
 35.1296 -	case RH_GET_STATUS | RH_OTHER | RH_CLASS:
 35.1297 -		cstatus = (status->cs_chg) |
 35.1298 -			(status->pe_chg << 1) |
 35.1299 -			(xhci->rh.c_p_r[wIndex - 1] << 4);
 35.1300 -		retstatus = (status->cs) |
 35.1301 -			(status->pe << 1) |
 35.1302 -			(status->susp << 2) |
 35.1303 -			(1 << 8) |      /* power on */
 35.1304 -			(status->lsda << 9);
 35.1305 -		*(__u16 *)data = cpu_to_le16(retstatus);
 35.1306 -		*(__u16 *)(data + 2) = cpu_to_le16(cstatus);
 35.1307 -		OK(4);
 35.1308 -	case RH_CLEAR_FEATURE | RH_ENDPOINT:
 35.1309 -		switch (wValue) {
 35.1310 -		case RH_ENDPOINT_STALL:
 35.1311 -			OK(0);
 35.1312 -		}
 35.1313 -		break;
 35.1314 -	case RH_CLEAR_FEATURE | RH_CLASS:
 35.1315 -		switch (wValue) {
 35.1316 -		case RH_C_HUB_OVER_CURRENT:
 35.1317 -			OK(0);	/* hub power over current */
 35.1318 -		}
 35.1319 -		break;
 35.1320 -	case RH_CLEAR_FEATURE | RH_OTHER | RH_CLASS:
 35.1321 -		switch (wValue) {
 35.1322 -		case RH_PORT_ENABLE:
 35.1323 -                        status->pe     = 0;
 35.1324 -			OK(0);
 35.1325 -		case RH_PORT_SUSPEND:
 35.1326 -                        status->susp   = 0;
 35.1327 -			OK(0);
 35.1328 -		case RH_PORT_POWER:
 35.1329 -			OK(0);	/* port power */
 35.1330 -		case RH_C_PORT_CONNECTION:
 35.1331 -                        status->cs_chg = 0;
 35.1332 -			OK(0);
 35.1333 -		case RH_C_PORT_ENABLE:
 35.1334 -                        status->pe_chg = 0;
 35.1335 -			OK(0);
 35.1336 -		case RH_C_PORT_SUSPEND:
 35.1337 -			/*** WR_RH_PORTSTAT(RH_PS_PSSC); */
 35.1338 -			OK(0);
 35.1339 -		case RH_C_PORT_OVER_CURRENT:
 35.1340 -			OK(0);	/* port power over current */
 35.1341 -		case RH_C_PORT_RESET:
 35.1342 -			xhci->rh.c_p_r[wIndex - 1] = 0;
 35.1343 -			OK(0);
 35.1344 -		}
 35.1345 -		break;
 35.1346 -	case RH_SET_FEATURE | RH_OTHER | RH_CLASS:
 35.1347 -		switch (wValue) {
 35.1348 -		case RH_PORT_SUSPEND:
 35.1349 -                        status->susp = 1;	
 35.1350 -			OK(0);
 35.1351 -		case RH_PORT_RESET:
 35.1352 -                {
 35.1353 -                        int ret;
 35.1354 -                        xhci->rh.c_p_r[wIndex - 1] = 1;
 35.1355 -                        status->pr = 0;
 35.1356 -                        status->pe = 1;
 35.1357 -                        ret = xhci_port_reset(wIndex - 1);
 35.1358 -                        /* XXX MAW: should probably cancel queued transfers during reset... *\/ */
 35.1359 -                        if ( ret == 0 ) { OK(0); }
 35.1360 -                        else { return ret; }
 35.1361 -                }
 35.1362 -                break;
 35.1363 -		case RH_PORT_POWER:
 35.1364 -			OK(0); /* port power ** */
 35.1365 -		case RH_PORT_ENABLE:
 35.1366 -                        status->pe = 1;
 35.1367 -			OK(0);
 35.1368 -		}
 35.1369 -		break;
 35.1370 -	case RH_SET_ADDRESS:
 35.1371 -		xhci->rh.devnum = wValue;
 35.1372 -		OK(0);
 35.1373 -	case RH_GET_DESCRIPTOR:
 35.1374 -		switch ((wValue & 0xff00) >> 8) {
 35.1375 -		case 0x01:	/* device descriptor */
 35.1376 -			len = min_t(unsigned int, leni,
 35.1377 -				  min_t(unsigned int,
 35.1378 -				      sizeof(root_hub_dev_des), wLength));
 35.1379 -			memcpy(data, root_hub_dev_des, len);
 35.1380 -			OK(len);
 35.1381 -		case 0x02:	/* configuration descriptor */
 35.1382 -			len = min_t(unsigned int, leni,
 35.1383 -				  min_t(unsigned int,
 35.1384 -				      sizeof(root_hub_config_des), wLength));
 35.1385 -			memcpy (data, root_hub_config_des, len);
 35.1386 -			OK(len);
 35.1387 -		case 0x03:	/* string descriptors */
 35.1388 -			len = usb_root_hub_string (wValue & 0xff,
 35.1389 -				0, "XHCI-alt",
 35.1390 -				data, wLength);
 35.1391 -			if (len > 0) {
 35.1392 -				OK(min_t(int, leni, len));
 35.1393 -			} else 
 35.1394 -				stat = -EPIPE;
 35.1395 -		}
 35.1396 -		break;
 35.1397 -	case RH_GET_DESCRIPTOR | RH_CLASS:
 35.1398 -		root_hub_hub_des[2] = xhci->rh.numports;
 35.1399 -		len = min_t(unsigned int, leni,
 35.1400 -			  min_t(unsigned int, sizeof(root_hub_hub_des), wLength));
 35.1401 -		memcpy(data, root_hub_hub_des, len);
 35.1402 -		OK(len);
 35.1403 -	case RH_GET_CONFIGURATION:
 35.1404 -		*(__u8 *)data = 0x01;
 35.1405 -		OK(1);
 35.1406 -	case RH_SET_CONFIGURATION:
 35.1407 -		OK(0);
 35.1408 -	case RH_GET_INTERFACE | RH_INTERFACE:
 35.1409 -		*(__u8 *)data = 0x00;
 35.1410 -		OK(1);
 35.1411 -	case RH_SET_INTERFACE | RH_INTERFACE:
 35.1412 -		OK(0);
 35.1413 -	default:
 35.1414 -		stat = -EPIPE;
 35.1415 -	}
 35.1416 -
 35.1417 -        spin_unlock_irqrestore(&xhci->rh.port_state_lock, flags);
 35.1418 -
 35.1419 -	urb->actual_length = len;
 35.1420 -
 35.1421 -	return stat;
 35.1422 -}
 35.1423 -
 35.1424 -/*
 35.1425 - * MUST be called with urb->lock acquired
 35.1426 - */
 35.1427 -static int rh_unlink_urb(struct urb *urb)
 35.1428 -{
 35.1429 -	if (xhci->rh.urb == urb) {
 35.1430 -		urb->status = -ENOENT;
 35.1431 -		xhci->rh.send = 0;
 35.1432 -		xhci->rh.urb = NULL;
 35.1433 -		del_timer(&xhci->rh.rh_int_timer);
 35.1434 -	}
 35.1435 -	return 0;
 35.1436 -}
 35.1437 -
 35.1438 -/******************************************************************************
 35.1439 - * CONTROL PLANE FUNCTIONALITY
 35.1440 - */
 35.1441 -
 35.1442 -/**
 35.1443 - * alloc_xhci - initialise a new virtual root hub for a new USB device channel
 35.1444 - */
 35.1445 -static int alloc_xhci(void)
 35.1446 -{
 35.1447 -	int retval;
 35.1448 -	struct usb_bus *bus;
 35.1449 -
 35.1450 -	retval = -EBUSY;
 35.1451 -
 35.1452 -	xhci = kmalloc(sizeof(*xhci), GFP_KERNEL);
 35.1453 -	if (!xhci) {
 35.1454 -		err("couldn't allocate xhci structure");
 35.1455 -		retval = -ENOMEM;
 35.1456 -		goto err_alloc_xhci;
 35.1457 -	}
 35.1458 -
 35.1459 -	xhci->state = USBIF_STATE_CLOSED;
 35.1460 -
 35.1461 -	spin_lock_init(&xhci->urb_list_lock);
 35.1462 -	INIT_LIST_HEAD(&xhci->urb_list);
 35.1463 -
 35.1464 -	spin_lock_init(&xhci->complete_list_lock);
 35.1465 -	INIT_LIST_HEAD(&xhci->complete_list);
 35.1466 -
 35.1467 -	spin_lock_init(&xhci->frame_list_lock);
 35.1468 -
 35.1469 -	bus = usb_alloc_bus(&xhci_device_operations);
 35.1470 -
 35.1471 -	if (!bus) {
 35.1472 -		err("unable to allocate bus");
 35.1473 -		goto err_alloc_bus;
 35.1474 -	}
 35.1475 -
 35.1476 -	xhci->bus = bus;
 35.1477 -	bus->bus_name = "XHCI";
 35.1478 -	bus->hcpriv = xhci;
 35.1479 -
 35.1480 -	usb_register_bus(xhci->bus);
 35.1481 -
 35.1482 -	/* Initialize the root hub */
 35.1483 -
 35.1484 -	xhci->rh.numports = 0;
 35.1485 -
 35.1486 -	xhci->bus->root_hub = xhci->rh.dev = usb_alloc_dev(NULL, xhci->bus);
 35.1487 -	if (!xhci->rh.dev) {
 35.1488 -		err("unable to allocate root hub");
 35.1489 -		goto err_alloc_root_hub;
 35.1490 -	}
 35.1491 -
 35.1492 -	xhci->state = 0;
 35.1493 -
 35.1494 -	return 0;
 35.1495 -
 35.1496 -/*
 35.1497 - * error exits:
 35.1498 - */
 35.1499 -err_alloc_root_hub:
 35.1500 -        usb_deregister_bus(xhci->bus);
 35.1501 -	usb_free_bus(xhci->bus);
 35.1502 -	xhci->bus = NULL;
 35.1503 -
 35.1504 -err_alloc_bus:
 35.1505 -	kfree(xhci);
 35.1506 -
 35.1507 -err_alloc_xhci:
 35.1508 -	return retval;
 35.1509 -}
 35.1510 -
 35.1511 -/**
 35.1512 - * usbif_status_change - deal with an incoming USB_INTERFACE_STATUS_ message
 35.1513 - */
 35.1514 -static void usbif_status_change(usbif_fe_interface_status_changed_t *status)
 35.1515 -{
 35.1516 -    ctrl_msg_t                   cmsg;
 35.1517 -    usbif_fe_interface_connect_t up;
 35.1518 -    long rc;
 35.1519 -    usbif_sring_t *sring;
 35.1520 -
 35.1521 -    switch ( status->status )
 35.1522 -    {
 35.1523 -    case USBIF_INTERFACE_STATUS_DESTROYED:
 35.1524 -        printk(KERN_WARNING "Unexpected usbif-DESTROYED message in state %d\n",
 35.1525 -               xhci->state);
 35.1526 -        break;
 35.1527 -
 35.1528 -    case USBIF_INTERFACE_STATUS_DISCONNECTED:
 35.1529 -        if ( xhci->state != USBIF_STATE_CLOSED )
 35.1530 -        {
 35.1531 -            printk(KERN_WARNING "Unexpected usbif-DISCONNECTED message"
 35.1532 -                   " in state %d\n", xhci->state);
 35.1533 -            break;
 35.1534 -            /* Not bothering to do recovery here for now.  Keep things
 35.1535 -             * simple. */
 35.1536 -
 35.1537 -            spin_lock_irq(&xhci->ring_lock);
 35.1538 -            
 35.1539 -            /* Clean up resources. */
 35.1540 -            free_page((unsigned long)xhci->usb_ring.sring);
 35.1541 -            unbind_evtchn_from_irqhandler(xhci->evtchn, xhci);
 35.1542 -
 35.1543 -            /* Plug the ring. */
 35.1544 -            xhci->recovery = 1;
 35.1545 -            wmb();
 35.1546 -            
 35.1547 -            spin_unlock_irq(&xhci->ring_lock);
 35.1548 -        }
 35.1549 -
 35.1550 -        /* Move from CLOSED to DISCONNECTED state. */
 35.1551 -        sring = (usbif_sring_t *)__get_free_page(GFP_KERNEL);
 35.1552 -        SHARED_RING_INIT(sring);
 35.1553 -        FRONT_RING_INIT(&xhci->usb_ring, sring, PAGE_SIZE);
 35.1554 -        xhci->state  = USBIF_STATE_DISCONNECTED;
 35.1555 -
 35.1556 -        /* Construct an interface-CONNECT message for the domain controller. */
 35.1557 -        cmsg.type      = CMSG_USBIF_FE;
 35.1558 -        cmsg.subtype   = CMSG_USBIF_FE_INTERFACE_CONNECT;
 35.1559 -        cmsg.length    = sizeof(usbif_fe_interface_connect_t);
 35.1560 -        up.shmem_frame = virt_to_mfn(sring);
 35.1561 -        memcpy(cmsg.msg, &up, sizeof(up));
 35.1562 -        
 35.1563 -        /* Tell the controller to bring up the interface. */
 35.1564 -        ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
 35.1565 -        break;
 35.1566 -
 35.1567 -    case USBIF_INTERFACE_STATUS_CONNECTED:
 35.1568 -        if ( xhci->state == USBIF_STATE_CLOSED )
 35.1569 -        {
 35.1570 -            printk(KERN_WARNING "Unexpected usbif-CONNECTED message"
 35.1571 -                   " in state %d\n", xhci->state);
 35.1572 -            break;
 35.1573 -        }
 35.1574 -
 35.1575 -        xhci->evtchn = status->evtchn;
 35.1576 -	xhci->bandwidth = status->bandwidth;
 35.1577 -	xhci->rh.numports = status->num_ports;
 35.1578 -
 35.1579 -        xhci->rh.ports = kmalloc (sizeof(xhci_port_t) * xhci->rh.numports, GFP_KERNEL);
 35.1580 -	
 35.1581 -	if ( xhci->rh.ports == NULL )
 35.1582 -            goto alloc_ports_nomem;
 35.1583 -	
 35.1584 -        memset(xhci->rh.ports, 0, sizeof(xhci_port_t) * xhci->rh.numports);
 35.1585 -
 35.1586 -	usb_connect(xhci->rh.dev);
 35.1587 -
 35.1588 -	if (usb_new_device(xhci->rh.dev) != 0) {
 35.1589 -		err("unable to start root hub");
 35.1590 -	}
 35.1591 -
 35.1592 -	/* Allocate the appropriate USB bandwidth here...  Need to
 35.1593 -         * somehow know what the total available is thought to be so we
 35.1594 -         * can calculate the reservation correctly. */
 35.1595 - 	usb_claim_bandwidth(xhci->rh.dev, xhci->rh.urb,
 35.1596 - 			    1000 - xhci->bandwidth, 0);
 35.1597 -
 35.1598 -        if ( (rc = bind_evtchn_to_irqhandler(xhci->evtchn, xhci_interrupt, 
 35.1599 -                               SA_SAMPLE_RANDOM, "usbif", xhci)) )
 35.1600 -                printk(KERN_ALERT"usbfront request_irq failed (%ld)\n",rc);
 35.1601 -
 35.1602 -	DPRINTK(KERN_INFO __FILE__
 35.1603 -                ": USB XHCI: SHM at %p (0x%lx), EVTCHN %d\n",
 35.1604 -                xhci->usb_ring.sring, virt_to_mfn(xhci->usbif),
 35.1605 -                xhci->evtchn);
 35.1606 -
 35.1607 -        xhci->state = USBIF_STATE_CONNECTED;
 35.1608 -
 35.1609 -        break;
 35.1610 -
 35.1611 -    default:
 35.1612 -        printk(KERN_WARNING "Status change to unknown value %d\n", 
 35.1613 -               status->status);
 35.1614 -        break;
 35.1615 -    }
 35.1616 -
 35.1617 -    return;
 35.1618 -
 35.1619 - alloc_ports_nomem:
 35.1620 -    printk(KERN_WARNING "Failed to allocate port memory, XHCI failed to connect.\n");
 35.1621 -    return;
 35.1622 -}
 35.1623 -
 35.1624 -/**
 35.1625 - * usbif_ctrlif_rx - demux control messages by subtype
 35.1626 - */
 35.1627 -static void usbif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
 35.1628 -{
 35.1629 -    switch ( msg->subtype )
 35.1630 -    {
 35.1631 -    case CMSG_USBIF_FE_INTERFACE_STATUS_CHANGED:
 35.1632 -        usbif_status_change((usbif_fe_interface_status_changed_t *)
 35.1633 -                            &msg->msg[0]);
 35.1634 -        break;
 35.1635 -
 35.1636 -        /* New interface...? */
 35.1637 -    default:
 35.1638 -        msg->length = 0;
 35.1639 -        break;
 35.1640 -    }
 35.1641 -
 35.1642 -    ctrl_if_send_response(msg);
 35.1643 -}
 35.1644 -
 35.1645 -static void send_driver_up(void)
 35.1646 -{
 35.1647 -        control_msg_t cmsg;
 35.1648 -        usbif_fe_interface_status_changed_t st;
 35.1649 -
 35.1650 -        /* Send a driver-UP notification to the domain controller. */
 35.1651 -        cmsg.type      = CMSG_USBIF_FE;
 35.1652 -        cmsg.subtype   = CMSG_USBIF_FE_DRIVER_STATUS_CHANGED;
 35.1653 -        cmsg.length    = sizeof(usbif_fe_driver_status_changed_t);
 35.1654 -        st.status      = USBIF_DRIVER_STATUS_UP;
 35.1655 -        memcpy(cmsg.msg, &st, sizeof(st));
 35.1656 -        ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
 35.1657 -}
 35.1658 -
 35.1659 -void usbif_resume(void)
 35.1660 -{
 35.1661 -        int i;
 35.1662 -        
 35.1663 -        /* Fake disconnection on all virtual USB ports (suspending / migrating
 35.1664 -         * will destroy hard state associated will the USB devices anyhow). */
 35.1665 -        /* No need to lock here. */
 35.1666 -        for ( i = 0; i < xhci->rh.numports; i++ )
 35.1667 -        {
 35.1668 -                xhci->rh.ports[i].cs = 0;
 35.1669 -                xhci->rh.ports[i].cs_chg = 1;
 35.1670 -		xhci->rh.ports[i].pe = 0;
 35.1671 -        }
 35.1672 -        
 35.1673 -        send_driver_up();
 35.1674 -}
 35.1675 -
 35.1676 -static int __init xhci_hcd_init(void)
 35.1677 -{
 35.1678 -	int retval = -ENOMEM, i;
 35.1679 -
 35.1680 -	if ( (xen_start_info->flags & SIF_INITDOMAIN) ||
 35.1681 -	     (xen_start_info->flags & SIF_USB_BE_DOMAIN) )
 35.1682 -                return 0;
 35.1683 -
 35.1684 -	info(DRIVER_DESC " " DRIVER_VERSION);
 35.1685 -
 35.1686 -	if (debug) {
 35.1687 -		errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
 35.1688 -		if (!errbuf)
 35.1689 -			goto errbuf_failed;
 35.1690 -	}
 35.1691 -
 35.1692 -	xhci_up_cachep = kmem_cache_create("xhci_urb_priv",
 35.1693 -		sizeof(struct urb_priv), 0, 0, NULL, NULL);
 35.1694 -	if (!xhci_up_cachep)
 35.1695 -		goto up_failed;
 35.1696 -
 35.1697 -        /* Let the domain controller know we're here.  For now we wait until
 35.1698 -         * connection, as for the block and net drivers.  This is only strictly
 35.1699 -         * necessary if we're going to boot off a USB device. */
 35.1700 -        printk(KERN_INFO "Initialising Xen virtual USB hub\n");
 35.1701 -    
 35.1702 -        (void)ctrl_if_register_receiver(CMSG_USBIF_FE, usbif_ctrlif_rx,
 35.1703 -                                        CALLBACK_IN_BLOCKING_CONTEXT);
 35.1704 -        
 35.1705 -	alloc_xhci();
 35.1706 -
 35.1707 -        send_driver_up();
 35.1708 -
 35.1709 -        /*
 35.1710 -         * We should read 'nr_interfaces' from response message and wait
 35.1711 -         * for notifications before proceeding. For now we assume that we
 35.1712 -         * will be notified of exactly one interface.
 35.1713 -         */
 35.1714 -        for ( i=0; (xhci->state != USBIF_STATE_CONNECTED) && (i < 10*HZ); i++ )
 35.1715 -        {
 35.1716 -            set_current_state(TASK_INTERRUPTIBLE);
 35.1717 -            schedule_timeout(1);
 35.1718 -        }
 35.1719 -        
 35.1720 -        if (xhci->state != USBIF_STATE_CONNECTED)
 35.1721 -            printk(KERN_WARNING "Timeout connecting USB frontend driver!\n");
 35.1722 -	
 35.1723 -	return 0;
 35.1724 -
 35.1725 -up_failed:
 35.1726 -	if (errbuf)
 35.1727 -		kfree(errbuf);
 35.1728 -
 35.1729 -errbuf_failed:
 35.1730 -	return retval;
 35.1731 -}
 35.1732 -
 35.1733 -module_init(xhci_hcd_init);
 35.1734 -
 35.1735 -MODULE_AUTHOR(DRIVER_AUTHOR);
 35.1736 -MODULE_DESCRIPTION(DRIVER_DESC);
 35.1737 -MODULE_LICENSE("GPL");
 35.1738 -
    36.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbfront/xhci.h	Thu Sep 22 16:05:44 2005 +0100
    36.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    36.3 @@ -1,182 +0,0 @@
    36.4 -/******************************************************************************
    36.5 - * xhci.h
    36.6 - *
    36.7 - * Private definitions for the Xen Virtual USB Controller.  Based on
    36.8 - * drivers/usb/host/uhci.h from Linux.  Copyright for the imported content is
    36.9 - * retained by the original authors.
   36.10 - *
   36.11 - * Modifications are:
   36.12 - * Copyright (C) 2004 Intel Research Cambridge
   36.13 - * Copyright (C) 2004, 2005 Mark Williamson
   36.14 - */
   36.15 -
   36.16 -#ifndef __LINUX_XHCI_H
   36.17 -#define __LINUX_XHCI_H
   36.18 -
   36.19 -#include <linux/list.h>
   36.20 -#include <linux/usb.h>
   36.21 -#include <asm-xen/xen-public/io/usbif.h>
   36.22 -#include <linux/spinlock.h>
   36.23 -
   36.24 -/* xhci_port_t - current known state of a virtual hub ports */
   36.25 -typedef struct {
   36.26 -        unsigned int cs     :1; /* Connection status.         */
   36.27 -        unsigned int cs_chg :1; /* Connection status change.  */
   36.28 -        unsigned int pe     :1; /* Port enable.               */
   36.29 -        unsigned int pe_chg :1; /* Port enable change.        */
   36.30 -        unsigned int susp   :1; /* Suspended.                 */
   36.31 -        unsigned int lsda   :1; /* Low speed device attached. */
   36.32 -        unsigned int pr     :1; /* Port reset.                */
   36.33 -} xhci_port_t;
   36.34 -
   36.35 -/* struct virt_root_hub - state related to the virtual root hub */
   36.36 -struct virt_root_hub {
   36.37 -	struct usb_device *dev;
   36.38 -	int devnum;		/* Address of Root Hub endpoint */
   36.39 -	struct urb *urb;
   36.40 -	void *int_addr;
   36.41 -	int send;
   36.42 -	int interval;
   36.43 -	int numports;
   36.44 -	int c_p_r[8];
   36.45 -	struct timer_list rh_int_timer;
   36.46 -        spinlock_t port_state_lock;
   36.47 -        xhci_port_t *ports;
   36.48 -};
   36.49 -
   36.50 -/* struct xhci - contains the state associated with a single USB interface */
   36.51 -struct xhci {
   36.52 -
   36.53 -#ifdef CONFIG_PROC_FS
   36.54 -	/* procfs */
   36.55 -	int num;
   36.56 -	struct proc_dir_entry *proc_entry;
   36.57 -#endif
   36.58 -
   36.59 -        int evtchn;                        /* Interdom channel to backend */
   36.60 -        enum { 
   36.61 -                USBIF_STATE_CONNECTED    = 2,
   36.62 -                USBIF_STATE_DISCONNECTED = 1,
   36.63 -                USBIF_STATE_CLOSED       = 0
   36.64 -        } state; /* State of this USB interface */
   36.65 -        unsigned long recovery; /* boolean recovery in progress flag */
   36.66 -        
   36.67 -        unsigned long bandwidth;
   36.68 -
   36.69 -	struct usb_bus *bus;
   36.70 -
   36.71 -	/* Main list of URB's currently controlled by this HC */
   36.72 -	spinlock_t urb_list_lock;
   36.73 -	struct list_head urb_list;		/* P: xhci->urb_list_lock */
   36.74 -
   36.75 -	/* List of URB's awaiting completion callback */
   36.76 -	spinlock_t complete_list_lock;
   36.77 -	struct list_head complete_list;		/* P: xhci->complete_list_lock */
   36.78 -
   36.79 -	struct virt_root_hub rh;	/* private data of the virtual root hub */
   36.80 -
   36.81 -        spinlock_t ring_lock;
   36.82 -        usbif_front_ring_t usb_ring;
   36.83 -
   36.84 -        int awaiting_reset;
   36.85 -};
   36.86 -
   36.87 -/* per-URB private data structure for the host controller */
   36.88 -struct urb_priv {
   36.89 -	struct urb *urb;
   36.90 -        usbif_iso_t *schedule;
   36.91 -	struct usb_device *dev;
   36.92 -
   36.93 -        int in_progress : 1;	        /* QH was queued (not linked in) */
   36.94 -	int short_control_packet : 1;	/* If we get a short packet during */
   36.95 -					/*  a control transfer, retrigger */
   36.96 -					/*  the status phase */
   36.97 -
   36.98 -	int status;			/* Final status */
   36.99 -
  36.100 -	unsigned long inserttime;	/* In jiffies */
  36.101 -
  36.102 -	struct list_head complete_list;	/* P: xhci->complete_list_lock */
  36.103 -};
  36.104 -
  36.105 -/*
  36.106 - * Locking in xhci.c
  36.107 - *
  36.108 - * spinlocks are used extensively to protect the many lists and data
  36.109 - * structures we have. It's not that pretty, but it's necessary. We
  36.110 - * need to be done with all of the locks (except complete_list_lock) when
  36.111 - * we call urb->complete. I've tried to make it simple enough so I don't
  36.112 - * have to spend hours racking my brain trying to figure out if the
  36.113 - * locking is safe.
  36.114 - *
  36.115 - * Here's the safe locking order to prevent deadlocks:
  36.116 - *
  36.117 - * #1 xhci->urb_list_lock
  36.118 - * #2 urb->lock
  36.119 - * #3 xhci->urb_remove_list_lock
  36.120 - * #4 xhci->complete_list_lock
  36.121 - *
  36.122 - * If you're going to grab 2 or more locks at once, ALWAYS grab the lock
  36.123 - * at the lowest level FIRST and NEVER grab locks at the same level at the
  36.124 - * same time.
  36.125 - * 
  36.126 - * So, if you need xhci->urb_list_lock, grab it before you grab urb->lock
  36.127 - */
  36.128 -
  36.129 -/* -------------------------------------------------------------------------
  36.130 -   Virtual Root HUB
  36.131 -   ------------------------------------------------------------------------- */
  36.132 -/* destination of request */
  36.133 -#define RH_DEVICE		0x00
  36.134 -#define RH_INTERFACE		0x01
  36.135 -#define RH_ENDPOINT		0x02
  36.136 -#define RH_OTHER		0x03
  36.137 -
  36.138 -#define RH_CLASS		0x20
  36.139 -#define RH_VENDOR		0x40
  36.140 -
  36.141 -/* Requests: bRequest << 8 | bmRequestType */
  36.142 -#define RH_GET_STATUS		0x0080
  36.143 -#define RH_CLEAR_FEATURE	0x0100
  36.144 -#define RH_SET_FEATURE		0x0300
  36.145 -#define RH_SET_ADDRESS		0x0500
  36.146 -#define RH_GET_DESCRIPTOR	0x0680
  36.147 -#define RH_SET_DESCRIPTOR	0x0700
  36.148 -#define RH_GET_CONFIGURATION	0x0880
  36.149 -#define RH_SET_CONFIGURATION	0x0900
  36.150 -#define RH_GET_STATE		0x0280
  36.151 -#define RH_GET_INTERFACE	0x0A80
  36.152 -#define RH_SET_INTERFACE	0x0B00
  36.153 -#define RH_SYNC_FRAME		0x0C80
  36.154 -/* Our Vendor Specific Request */
  36.155 -#define RH_SET_EP		0x2000
  36.156 -
  36.157 -/* Hub port features */
  36.158 -#define RH_PORT_CONNECTION	0x00
  36.159 -#define RH_PORT_ENABLE		0x01
  36.160 -#define RH_PORT_SUSPEND		0x02
  36.161 -#define RH_PORT_OVER_CURRENT	0x03
  36.162 -#define RH_PORT_RESET		0x04
  36.163 -#define RH_PORT_POWER		0x08
  36.164 -#define RH_PORT_LOW_SPEED	0x09
  36.165 -#define RH_C_PORT_CONNECTION	0x10
  36.166 -#define RH_C_PORT_ENABLE	0x11
  36.167 -#define RH_C_PORT_SUSPEND	0x12
  36.168 -#define RH_C_PORT_OVER_CURRENT	0x13
  36.169 -#define RH_C_PORT_RESET		0x14
  36.170 -
  36.171 -/* Hub features */
  36.172 -#define RH_C_HUB_LOCAL_POWER	0x00
  36.173 -#define RH_C_HUB_OVER_CURRENT	0x01
  36.174 -#define RH_DEVICE_REMOTE_WAKEUP	0x00
  36.175 -#define RH_ENDPOINT_STALL	0x01
  36.176 -
  36.177 -/* Our Vendor Specific feature */
  36.178 -#define RH_REMOVE_EP		0x00
  36.179 -
  36.180 -#define RH_ACK			0x01
  36.181 -#define RH_REQ_ERR		-1
  36.182 -#define RH_NACK			0x00
  36.183 -
  36.184 -#endif
  36.185 -
    37.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c	Thu Sep 22 16:05:44 2005 +0100
    37.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c	Thu Sep 22 16:12:14 2005 +0100
    37.3 @@ -231,3 +231,13 @@ void xb_suspend_comms(void)
    37.4  
    37.5  	unbind_evtchn_from_irqhandler(xen_start_info->store_evtchn, &xb_waitq);
    37.6  }
    37.7 +
    37.8 +/*
    37.9 + * Local variables:
   37.10 + *  c-file-style: "linux"
   37.11 + *  indent-tabs-mode: t
   37.12 + *  c-indent-level: 8
   37.13 + *  c-basic-offset: 8
   37.14 + *  tab-width: 8
   37.15 + * End:
   37.16 + */
    38.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h	Thu Sep 22 16:05:44 2005 +0100
    38.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h	Thu Sep 22 16:12:14 2005 +0100
    38.3 @@ -39,3 +39,13 @@ int xs_input_avail(void);
    38.4  extern wait_queue_head_t xb_waitq;
    38.5  
    38.6  #endif /* _XENBUS_COMMS_H */
    38.7 +
    38.8 +/*
    38.9 + * Local variables:
   38.10 + *  c-file-style: "linux"
   38.11 + *  indent-tabs-mode: t
   38.12 + *  c-indent-level: 8
   38.13 + *  c-basic-offset: 8
   38.14 + *  tab-width: 8
   38.15 + * End:
   38.16 + */
    39.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c	Thu Sep 22 16:05:44 2005 +0100
    39.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c	Thu Sep 22 16:12:14 2005 +0100
    39.3 @@ -186,3 +186,13 @@ xenbus_dev_init(void)
    39.4  }
    39.5  
    39.6  __initcall(xenbus_dev_init);
    39.7 +
    39.8 +/*
    39.9 + * Local variables:
   39.10 + *  c-file-style: "linux"
   39.11 + *  indent-tabs-mode: t
   39.12 + *  c-indent-level: 8
   39.13 + *  c-basic-offset: 8
   39.14 + *  tab-width: 8
   39.15 + * End:
   39.16 + */
    40.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c	Thu Sep 22 16:05:44 2005 +0100
    40.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c	Thu Sep 22 16:12:14 2005 +0100
    40.3 @@ -687,3 +687,13 @@ static int __init xenbus_probe_init(void
    40.4  }
    40.5  
    40.6  postcore_initcall(xenbus_probe_init);
    40.7 +
    40.8 +/*
    40.9 + * Local variables:
   40.10 + *  c-file-style: "linux"
   40.11 + *  indent-tabs-mode: t
   40.12 + *  c-indent-level: 8
   40.13 + *  c-basic-offset: 8
   40.14 + *  tab-width: 8
   40.15 + * End:
   40.16 + */
    41.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c	Thu Sep 22 16:05:44 2005 +0100
    41.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c	Thu Sep 22 16:12:14 2005 +0100
    41.3 @@ -566,3 +566,13 @@ int xs_init(void)
    41.4  		return PTR_ERR(watcher);
    41.5  	return 0;
    41.6  }
    41.7 +
    41.8 +/*
    41.9 + * Local variables:
   41.10 + *  c-file-style: "linux"
   41.11 + *  indent-tabs-mode: t
   41.12 + *  c-indent-level: 8
   41.13 + *  c-basic-offset: 8
   41.14 + *  tab-width: 8
   41.15 + * End:
   41.16 + */
    42.1 --- a/tools/python/xen/xend/XendDomain.py	Thu Sep 22 16:05:44 2005 +0100
    42.2 +++ b/tools/python/xen/xend/XendDomain.py	Thu Sep 22 16:12:14 2005 +0100
    42.3 @@ -305,6 +305,13 @@ class XendDomain:
    42.4  
    42.5          @param vmconfig: vm configuration
    42.6          """
    42.7 +        # We accept our configuration specified as ['config' [...]], which
    42.8 +        # some tools or configuration files may be using.  For save-restore,
    42.9 +        # we use the value of XendDomainInfo.sxpr() directly, which has no
   42.10 +        # such item.
   42.11 +        nested = sxp.child_value(config, 'config')
   42.12 +        if nested:
   42.13 +            config = nested
   42.14          return XendDomainInfo.restore(self.dbmap.getPath(), config)
   42.15  
   42.16      def domain_restore(self, src, progress=False):
    43.1 --- a/xen/include/public/io/netif.h	Thu Sep 22 16:05:44 2005 +0100
    43.2 +++ b/xen/include/public/io/netif.h	Thu Sep 22 16:12:14 2005 +0100
    43.3 @@ -10,10 +10,11 @@
    43.4  #define __XEN_PUBLIC_IO_NETIF_H__
    43.5  
    43.6  typedef struct netif_tx_request {
    43.7 -    unsigned long addr;   /* Machine address of packet.  */
    43.8 +    grant_ref_t gref;      /* Reference to buffer page */
    43.9 +    u16      offset:15;    /* Offset within buffer page */
   43.10      u16      csum_blank:1; /* Proto csum field blank?   */
   43.11 -    u16      id:15;  /* Echoed in response message. */
   43.12 -    u16      size;   /* Packet size in bytes.       */
   43.13 +    u16      id;           /* Echoed in response message. */
   43.14 +    u16      size;         /* Packet size in bytes.       */
   43.15  } netif_tx_request_t;
   43.16  
   43.17  typedef struct netif_tx_response {
   43.18 @@ -22,21 +23,15 @@ typedef struct netif_tx_response {
   43.19  } netif_tx_response_t;
   43.20  
   43.21  typedef struct {
   43.22 -    u16       id;    /* Echoed in response message.        */
   43.23 -#ifdef CONFIG_XEN_NETDEV_GRANT
   43.24 -    grant_ref_t gref;	/* 2: Reference to incoming granted frame */
   43.25 -#endif
   43.26 +    u16       id;       /* Echoed in response message.        */
   43.27 +    grant_ref_t gref;	/* Reference to incoming granted frame */
   43.28  } netif_rx_request_t;
   43.29  
   43.30  typedef struct {
   43.31 -#ifdef CONFIG_XEN_NETDEV_GRANT
   43.32 -    u32      addr;   /*  0: Offset in page of start of received packet  */
   43.33 -#else
   43.34 -    unsigned long addr; /* Machine address of packet.              */
   43.35 -#endif
   43.36 -    u16      csum_valid:1; /* Protocol checksum is validated?       */
   43.37 -    u16      id:15;
   43.38 -    s16      status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
   43.39 +    u16      offset;     /* Offset in page of start of received packet  */
   43.40 +    u16      csum_valid; /* Protocol checksum is validated?       */
   43.41 +    u16      id;
   43.42 +    s16      status;     /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
   43.43  } netif_rx_response_t;
   43.44  
   43.45  /*
   43.46 @@ -53,18 +48,8 @@ typedef u32 NETIF_RING_IDX;
   43.47  #define MASK_NETIF_RX_IDX(_i) ((_i)&(NETIF_RX_RING_SIZE-1))
   43.48  #define MASK_NETIF_TX_IDX(_i) ((_i)&(NETIF_TX_RING_SIZE-1))
   43.49  
   43.50 -#ifdef __x86_64__
   43.51 -/*
   43.52 - * This restriction can be lifted when we move netfront/netback to use
   43.53 - * grant tables. This will remove memory_t fields from the above structures
   43.54 - * and thus relax natural alignment restrictions.
   43.55 - */
   43.56 -#define NETIF_TX_RING_SIZE 128
   43.57 -#define NETIF_RX_RING_SIZE 128
   43.58 -#else
   43.59  #define NETIF_TX_RING_SIZE 256
   43.60  #define NETIF_RX_RING_SIZE 256
   43.61 -#endif
   43.62  
   43.63  /* This structure must fit in a memory page. */
   43.64  typedef struct netif_tx_interface {