ia64/xen-unstable
changeset 7022:10759a44ce3b
Merged.
line diff
1.1 --- a/linux-2.6-xen-sparse/arch/xen/Kconfig Thu Sep 22 16:05:44 2005 +0100 1.2 +++ b/linux-2.6-xen-sparse/arch/xen/Kconfig Thu Sep 22 16:12:14 2005 +0100 1.3 @@ -111,13 +111,6 @@ config XEN_NETDEV_FRONTEND 1.4 dedicated device-driver domain, or your master control domain 1.5 (domain 0), then you almost certainly want to say Y here. 1.6 1.7 -config XEN_NETDEV_GRANT 1.8 - bool "Grant table substrate for network drivers (DANGEROUS)" 1.9 - default n 1.10 - help 1.11 - This introduces the use of grant tables as a data exhange mechanism 1.12 - between the frontend and backend network drivers. 1.13 - 1.14 config XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER 1.15 bool "Pipelined transmitter (DANGEROUS)" 1.16 depends on XEN_NETDEV_FRONTEND
2.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 Thu Sep 22 16:05:44 2005 +0100 2.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 Thu Sep 22 16:12:14 2005 +0100 2.3 @@ -19,7 +19,6 @@ CONFIG_XEN_NETDEV_BACKEND=y 2.4 # CONFIG_XEN_TPMDEV_BACKEND is not set 2.5 CONFIG_XEN_BLKDEV_FRONTEND=y 2.6 CONFIG_XEN_NETDEV_FRONTEND=y 2.7 -CONFIG_XEN_NETDEV_GRANT=y 2.8 # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set 2.9 # CONFIG_XEN_BLKDEV_TAP is not set 2.10 # CONFIG_XEN_SHADOW_MODE is not set
3.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 Thu Sep 22 16:05:44 2005 +0100 3.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 Thu Sep 22 16:12:14 2005 +0100 3.3 @@ -19,7 +19,6 @@ CONFIG_XEN_NETDEV_BACKEND=y 3.4 # CONFIG_XEN_TPMDEV_BACKEND is not set 3.5 CONFIG_XEN_BLKDEV_FRONTEND=y 3.6 CONFIG_XEN_NETDEV_FRONTEND=y 3.7 -CONFIG_XEN_NETDEV_GRANT=y 3.8 # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set 3.9 # CONFIG_XEN_BLKDEV_TAP is not set 3.10 # CONFIG_XEN_SHADOW_MODE is not set
4.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32 Thu Sep 22 16:05:44 2005 +0100 4.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32 Thu Sep 22 16:12:14 2005 +0100 4.3 @@ -16,7 +16,6 @@ CONFIG_NO_IDLE_HZ=y 4.4 # CONFIG_XEN_TPMDEV_BACKEND is not set 4.5 CONFIG_XEN_BLKDEV_FRONTEND=y 4.6 CONFIG_XEN_NETDEV_FRONTEND=y 4.7 -CONFIG_XEN_NETDEV_GRANT=y 4.8 # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set 4.9 # CONFIG_XEN_BLKDEV_TAP is not set 4.10 # CONFIG_XEN_SHADOW_MODE is not set
5.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 Thu Sep 22 16:05:44 2005 +0100 5.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 Thu Sep 22 16:12:14 2005 +0100 5.3 @@ -16,7 +16,6 @@ CONFIG_NO_IDLE_HZ=y 5.4 # CONFIG_XEN_TPMDEV_BACKEND is not set 5.5 CONFIG_XEN_BLKDEV_FRONTEND=y 5.6 CONFIG_XEN_NETDEV_FRONTEND=y 5.7 -CONFIG_XEN_NETDEV_GRANT=y 5.8 # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set 5.9 # CONFIG_XEN_BLKDEV_TAP is not set 5.10 # CONFIG_XEN_SHADOW_MODE is not set
6.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32 Thu Sep 22 16:05:44 2005 +0100 6.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32 Thu Sep 22 16:12:14 2005 +0100 6.3 @@ -19,7 +19,6 @@ CONFIG_XEN_NETDEV_BACKEND=y 6.4 # CONFIG_XEN_TPMDEV_BACKEND is not set 6.5 CONFIG_XEN_BLKDEV_FRONTEND=y 6.6 CONFIG_XEN_NETDEV_FRONTEND=y 6.7 -CONFIG_XEN_NETDEV_GRANT=y 6.8 # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set 6.9 # CONFIG_XEN_BLKDEV_TAP is not set 6.10 # CONFIG_XEN_SHADOW_MODE is not set
7.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64 Thu Sep 22 16:05:44 2005 +0100 7.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64 Thu Sep 22 16:12:14 2005 +0100 7.3 @@ -19,7 +19,6 @@ CONFIG_XEN_NETDEV_BACKEND=y 7.4 # CONFIG_XEN_TPMDEV_BACKEND is not set 7.5 CONFIG_XEN_BLKDEV_FRONTEND=y 7.6 CONFIG_XEN_NETDEV_FRONTEND=y 7.7 -CONFIG_XEN_NETDEV_GRANT=y 7.8 # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set 7.9 # CONFIG_XEN_BLKDEV_TAP is not set 7.10 # CONFIG_XEN_SHADOW_MODE is not set
8.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c Thu Sep 22 16:05:44 2005 +0100 8.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c Thu Sep 22 16:12:14 2005 +0100 8.3 @@ -28,12 +28,12 @@ 8.4 #define BATCH_PER_DOMAIN 16 8.5 8.6 static unsigned long mmap_vstart; 8.7 -#define MMAP_PAGES \ 8.8 - (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST) 8.9 -#define MMAP_VADDR(_req,_seg) \ 8.10 - (mmap_vstart + \ 8.11 - ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \ 8.12 - ((_seg) * PAGE_SIZE)) 8.13 +#define MMAP_PAGES \ 8.14 + (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST) 8.15 +#define MMAP_VADDR(_req,_seg) \ 8.16 + (mmap_vstart + \ 8.17 + ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \ 8.18 + ((_seg) * PAGE_SIZE)) 8.19 8.20 /* 8.21 * Each outstanding request that we've passed to the lower device layers has a 8.22 @@ -42,12 +42,12 @@ static unsigned long mmap_vstart; 8.23 * response queued for it, with the saved 'id' passed back. 8.24 */ 8.25 typedef struct { 8.26 - blkif_t *blkif; 8.27 - unsigned long id; 8.28 - int nr_pages; 8.29 - atomic_t pendcnt; 8.30 - unsigned short operation; 8.31 - int status; 8.32 + blkif_t *blkif; 8.33 + unsigned long id; 8.34 + int nr_pages; 8.35 + atomic_t pendcnt; 8.36 + unsigned short operation; 8.37 + int status; 8.38 } pending_req_t; 8.39 8.40 /* 8.41 @@ -68,14 +68,13 @@ static PEND_RING_IDX pending_prod, pendi 8.42 static request_queue_t *plugged_queue; 8.43 static inline void flush_plugged_queue(void) 8.44 { 8.45 - request_queue_t *q = plugged_queue; 8.46 - if ( q != NULL ) 8.47 - { 8.48 - if ( q->unplug_fn != NULL ) 8.49 - q->unplug_fn(q); 8.50 - blk_put_queue(q); 8.51 - plugged_queue = NULL; 8.52 - } 8.53 + request_queue_t *q = plugged_queue; 8.54 + if (q != NULL) { 8.55 + if ( q->unplug_fn != NULL ) 8.56 + q->unplug_fn(q); 8.57 + blk_put_queue(q); 8.58 + plugged_queue = NULL; 8.59 + } 8.60 } 8.61 8.62 /* When using grant tables to map a frame for device access then the 8.63 @@ -106,24 +105,23 @@ static void make_response(blkif_t *blkif 8.64 8.65 static void fast_flush_area(int idx, int nr_pages) 8.66 { 8.67 - struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 8.68 - unsigned int i, invcount = 0; 8.69 - u16 handle; 8.70 + struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 8.71 + unsigned int i, invcount = 0; 8.72 + u16 handle; 8.73 8.74 - for ( i = 0; i < nr_pages; i++ ) 8.75 - { 8.76 - if ( BLKBACK_INVALID_HANDLE != ( handle = pending_handle(idx, i) ) ) 8.77 - { 8.78 - unmap[i].host_addr = MMAP_VADDR(idx, i); 8.79 - unmap[i].dev_bus_addr = 0; 8.80 - unmap[i].handle = handle; 8.81 - pending_handle(idx, i) = BLKBACK_INVALID_HANDLE; 8.82 - invcount++; 8.83 - } 8.84 - } 8.85 - if ( unlikely(HYPERVISOR_grant_table_op( 8.86 - GNTTABOP_unmap_grant_ref, unmap, invcount))) 8.87 - BUG(); 8.88 + for (i = 0; i < nr_pages; i++) { 8.89 + handle = pending_handle(idx, i); 8.90 + if (handle == BLKBACK_INVALID_HANDLE) 8.91 + continue; 8.92 + unmap[i].host_addr = MMAP_VADDR(idx, i); 8.93 + unmap[i].dev_bus_addr = 0; 8.94 + unmap[i].handle = handle; 8.95 + pending_handle(idx, i) = BLKBACK_INVALID_HANDLE; 8.96 + invcount++; 8.97 + } 8.98 + 8.99 + BUG_ON(HYPERVISOR_grant_table_op( 8.100 + GNTTABOP_unmap_grant_ref, unmap, invcount)); 8.101 } 8.102 8.103 8.104 @@ -136,34 +134,38 @@ static spinlock_t blkio_schedule_list_lo 8.105 8.106 static int __on_blkdev_list(blkif_t *blkif) 8.107 { 8.108 - return blkif->blkdev_list.next != NULL; 8.109 + return blkif->blkdev_list.next != NULL; 8.110 } 8.111 8.112 static void remove_from_blkdev_list(blkif_t *blkif) 8.113 { 8.114 - unsigned long flags; 8.115 - if ( !__on_blkdev_list(blkif) ) return; 8.116 - spin_lock_irqsave(&blkio_schedule_list_lock, flags); 8.117 - if ( __on_blkdev_list(blkif) ) 8.118 - { 8.119 - list_del(&blkif->blkdev_list); 8.120 - blkif->blkdev_list.next = NULL; 8.121 - blkif_put(blkif); 8.122 - } 8.123 - spin_unlock_irqrestore(&blkio_schedule_list_lock, flags); 8.124 + unsigned long flags; 8.125 + 8.126 + if (!__on_blkdev_list(blkif)) 8.127 + return; 8.128 + 8.129 + spin_lock_irqsave(&blkio_schedule_list_lock, flags); 8.130 + if (__on_blkdev_list(blkif)) { 8.131 + list_del(&blkif->blkdev_list); 8.132 + blkif->blkdev_list.next = NULL; 8.133 + blkif_put(blkif); 8.134 + } 8.135 + spin_unlock_irqrestore(&blkio_schedule_list_lock, flags); 8.136 } 8.137 8.138 static void add_to_blkdev_list_tail(blkif_t *blkif) 8.139 { 8.140 - unsigned long flags; 8.141 - if ( __on_blkdev_list(blkif) ) return; 8.142 - spin_lock_irqsave(&blkio_schedule_list_lock, flags); 8.143 - if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) ) 8.144 - { 8.145 - list_add_tail(&blkif->blkdev_list, &blkio_schedule_list); 8.146 - blkif_get(blkif); 8.147 - } 8.148 - spin_unlock_irqrestore(&blkio_schedule_list_lock, flags); 8.149 + unsigned long flags; 8.150 + 8.151 + if (__on_blkdev_list(blkif)) 8.152 + return; 8.153 + 8.154 + spin_lock_irqsave(&blkio_schedule_list_lock, flags); 8.155 + if (!__on_blkdev_list(blkif) && (blkif->status == CONNECTED)) { 8.156 + list_add_tail(&blkif->blkdev_list, &blkio_schedule_list); 8.157 + blkif_get(blkif); 8.158 + } 8.159 + spin_unlock_irqrestore(&blkio_schedule_list_lock, flags); 8.160 } 8.161 8.162 8.163 @@ -175,54 +177,53 @@ static DECLARE_WAIT_QUEUE_HEAD(blkio_sch 8.164 8.165 static int blkio_schedule(void *arg) 8.166 { 8.167 - DECLARE_WAITQUEUE(wq, current); 8.168 + DECLARE_WAITQUEUE(wq, current); 8.169 8.170 - blkif_t *blkif; 8.171 - struct list_head *ent; 8.172 + blkif_t *blkif; 8.173 + struct list_head *ent; 8.174 8.175 - daemonize("xenblkd"); 8.176 + daemonize("xenblkd"); 8.177 8.178 - for ( ; ; ) 8.179 - { 8.180 - /* Wait for work to do. */ 8.181 - add_wait_queue(&blkio_schedule_wait, &wq); 8.182 - set_current_state(TASK_INTERRUPTIBLE); 8.183 - if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 8.184 - list_empty(&blkio_schedule_list) ) 8.185 - schedule(); 8.186 - __set_current_state(TASK_RUNNING); 8.187 - remove_wait_queue(&blkio_schedule_wait, &wq); 8.188 + for (;;) { 8.189 + /* Wait for work to do. */ 8.190 + add_wait_queue(&blkio_schedule_wait, &wq); 8.191 + set_current_state(TASK_INTERRUPTIBLE); 8.192 + if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 8.193 + list_empty(&blkio_schedule_list) ) 8.194 + schedule(); 8.195 + __set_current_state(TASK_RUNNING); 8.196 + remove_wait_queue(&blkio_schedule_wait, &wq); 8.197 8.198 - /* Queue up a batch of requests. */ 8.199 - while ( (NR_PENDING_REQS < MAX_PENDING_REQS) && 8.200 - !list_empty(&blkio_schedule_list) ) 8.201 - { 8.202 - ent = blkio_schedule_list.next; 8.203 - blkif = list_entry(ent, blkif_t, blkdev_list); 8.204 - blkif_get(blkif); 8.205 - remove_from_blkdev_list(blkif); 8.206 - if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) ) 8.207 - add_to_blkdev_list_tail(blkif); 8.208 - blkif_put(blkif); 8.209 - } 8.210 + /* Queue up a batch of requests. */ 8.211 + while ((NR_PENDING_REQS < MAX_PENDING_REQS) && 8.212 + !list_empty(&blkio_schedule_list)) { 8.213 + ent = blkio_schedule_list.next; 8.214 + blkif = list_entry(ent, blkif_t, blkdev_list); 8.215 + blkif_get(blkif); 8.216 + remove_from_blkdev_list(blkif); 8.217 + if (do_block_io_op(blkif, BATCH_PER_DOMAIN)) 8.218 + add_to_blkdev_list_tail(blkif); 8.219 + blkif_put(blkif); 8.220 + } 8.221 8.222 - /* Push the batch through to disc. */ 8.223 - flush_plugged_queue(); 8.224 - } 8.225 + /* Push the batch through to disc. */ 8.226 + flush_plugged_queue(); 8.227 + } 8.228 } 8.229 8.230 static void maybe_trigger_blkio_schedule(void) 8.231 { 8.232 - /* 8.233 - * Needed so that two processes, who together make the following predicate 8.234 - * true, don't both read stale values and evaluate the predicate 8.235 - * incorrectly. Incredibly unlikely to stall the scheduler on x86, but... 8.236 - */ 8.237 - smp_mb(); 8.238 + /* 8.239 + * Needed so that two processes, which together make the following 8.240 + * predicate true, don't both read stale values and evaluate the 8.241 + * predicate incorrectly. Incredibly unlikely to stall the scheduler 8.242 + * on x86, but... 8.243 + */ 8.244 + smp_mb(); 8.245 8.246 - if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) && 8.247 - !list_empty(&blkio_schedule_list) ) 8.248 - wake_up(&blkio_schedule_wait); 8.249 + if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) && 8.250 + !list_empty(&blkio_schedule_list)) 8.251 + wake_up(&blkio_schedule_wait); 8.252 } 8.253 8.254 8.255 @@ -233,36 +234,34 @@ static void maybe_trigger_blkio_schedule 8.256 8.257 static void __end_block_io_op(pending_req_t *pending_req, int uptodate) 8.258 { 8.259 - unsigned long flags; 8.260 + unsigned long flags; 8.261 8.262 - /* An error fails the entire request. */ 8.263 - if ( !uptodate ) 8.264 - { 8.265 - DPRINTK("Buffer not up-to-date at end of operation\n"); 8.266 - pending_req->status = BLKIF_RSP_ERROR; 8.267 - } 8.268 + /* An error fails the entire request. */ 8.269 + if (!uptodate) { 8.270 + DPRINTK("Buffer not up-to-date at end of operation\n"); 8.271 + pending_req->status = BLKIF_RSP_ERROR; 8.272 + } 8.273 8.274 - if ( atomic_dec_and_test(&pending_req->pendcnt) ) 8.275 - { 8.276 - int pending_idx = pending_req - pending_reqs; 8.277 - fast_flush_area(pending_idx, pending_req->nr_pages); 8.278 - make_response(pending_req->blkif, pending_req->id, 8.279 - pending_req->operation, pending_req->status); 8.280 - blkif_put(pending_req->blkif); 8.281 - spin_lock_irqsave(&pend_prod_lock, flags); 8.282 - pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; 8.283 - spin_unlock_irqrestore(&pend_prod_lock, flags); 8.284 - maybe_trigger_blkio_schedule(); 8.285 - } 8.286 + if (atomic_dec_and_test(&pending_req->pendcnt)) { 8.287 + int pending_idx = pending_req - pending_reqs; 8.288 + fast_flush_area(pending_idx, pending_req->nr_pages); 8.289 + make_response(pending_req->blkif, pending_req->id, 8.290 + pending_req->operation, pending_req->status); 8.291 + blkif_put(pending_req->blkif); 8.292 + spin_lock_irqsave(&pend_prod_lock, flags); 8.293 + pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; 8.294 + spin_unlock_irqrestore(&pend_prod_lock, flags); 8.295 + maybe_trigger_blkio_schedule(); 8.296 + } 8.297 } 8.298 8.299 static int end_block_io_op(struct bio *bio, unsigned int done, int error) 8.300 { 8.301 - if ( bio->bi_size != 0 ) 8.302 - return 1; 8.303 - __end_block_io_op(bio->bi_private, !error); 8.304 - bio_put(bio); 8.305 - return error; 8.306 + if (bio->bi_size != 0) 8.307 + return 1; 8.308 + __end_block_io_op(bio->bi_private, !error); 8.309 + bio_put(bio); 8.310 + return error; 8.311 } 8.312 8.313 8.314 @@ -272,10 +271,10 @@ static int end_block_io_op(struct bio *b 8.315 8.316 irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs) 8.317 { 8.318 - blkif_t *blkif = dev_id; 8.319 - add_to_blkdev_list_tail(blkif); 8.320 - maybe_trigger_blkio_schedule(); 8.321 - return IRQ_HANDLED; 8.322 + blkif_t *blkif = dev_id; 8.323 + add_to_blkdev_list_tail(blkif); 8.324 + maybe_trigger_blkio_schedule(); 8.325 + return IRQ_HANDLED; 8.326 } 8.327 8.328 8.329 @@ -286,183 +285,174 @@ irqreturn_t blkif_be_int(int irq, void * 8.330 8.331 static int do_block_io_op(blkif_t *blkif, int max_to_do) 8.332 { 8.333 - blkif_back_ring_t *blk_ring = &blkif->blk_ring; 8.334 - blkif_request_t *req; 8.335 - RING_IDX i, rp; 8.336 - int more_to_do = 0; 8.337 + blkif_back_ring_t *blk_ring = &blkif->blk_ring; 8.338 + blkif_request_t *req; 8.339 + RING_IDX i, rp; 8.340 + int more_to_do = 0; 8.341 8.342 - rp = blk_ring->sring->req_prod; 8.343 - rmb(); /* Ensure we see queued requests up to 'rp'. */ 8.344 + rp = blk_ring->sring->req_prod; 8.345 + rmb(); /* Ensure we see queued requests up to 'rp'. */ 8.346 8.347 - for ( i = blk_ring->req_cons; 8.348 - (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i); 8.349 - i++ ) 8.350 - { 8.351 - if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) ) 8.352 - { 8.353 - more_to_do = 1; 8.354 - break; 8.355 - } 8.356 + for (i = blk_ring->req_cons; 8.357 + (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i); 8.358 + i++) { 8.359 + if ((max_to_do-- == 0) || 8.360 + (NR_PENDING_REQS == MAX_PENDING_REQS)) { 8.361 + more_to_do = 1; 8.362 + break; 8.363 + } 8.364 8.365 - req = RING_GET_REQUEST(blk_ring, i); 8.366 - switch ( req->operation ) 8.367 - { 8.368 - case BLKIF_OP_READ: 8.369 - case BLKIF_OP_WRITE: 8.370 - dispatch_rw_block_io(blkif, req); 8.371 - break; 8.372 + req = RING_GET_REQUEST(blk_ring, i); 8.373 + switch (req->operation) { 8.374 + case BLKIF_OP_READ: 8.375 + case BLKIF_OP_WRITE: 8.376 + dispatch_rw_block_io(blkif, req); 8.377 + break; 8.378 8.379 - default: 8.380 - DPRINTK("error: unknown block io operation [%d]\n", 8.381 - req->operation); 8.382 - make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); 8.383 - break; 8.384 - } 8.385 - } 8.386 + default: 8.387 + DPRINTK("error: unknown block io operation [%d]\n", 8.388 + req->operation); 8.389 + make_response(blkif, req->id, req->operation, 8.390 + BLKIF_RSP_ERROR); 8.391 + break; 8.392 + } 8.393 + } 8.394 8.395 - blk_ring->req_cons = i; 8.396 - return more_to_do; 8.397 + blk_ring->req_cons = i; 8.398 + return more_to_do; 8.399 } 8.400 8.401 static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req) 8.402 { 8.403 - extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 8.404 - int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ; 8.405 - unsigned long fas = 0; 8.406 - int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)]; 8.407 - pending_req_t *pending_req; 8.408 - struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 8.409 - struct phys_req preq; 8.410 - struct { 8.411 - unsigned long buf; unsigned int nsec; 8.412 - } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 8.413 - unsigned int nseg; 8.414 - struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 8.415 - int nbio = 0; 8.416 - request_queue_t *q; 8.417 + extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 8.418 + int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ; 8.419 + unsigned long fas = 0; 8.420 + int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)]; 8.421 + pending_req_t *pending_req; 8.422 + struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 8.423 + struct phys_req preq; 8.424 + struct { 8.425 + unsigned long buf; unsigned int nsec; 8.426 + } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 8.427 + unsigned int nseg; 8.428 + struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 8.429 + int nbio = 0; 8.430 + request_queue_t *q; 8.431 8.432 - /* Check that number of segments is sane. */ 8.433 - nseg = req->nr_segments; 8.434 - if ( unlikely(nseg == 0) || 8.435 - unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) 8.436 - { 8.437 - DPRINTK("Bad number of segments in request (%d)\n", nseg); 8.438 - goto bad_descriptor; 8.439 - } 8.440 + /* Check that number of segments is sane. */ 8.441 + nseg = req->nr_segments; 8.442 + if (unlikely(nseg == 0) || 8.443 + unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { 8.444 + DPRINTK("Bad number of segments in request (%d)\n", nseg); 8.445 + goto bad_descriptor; 8.446 + } 8.447 8.448 - preq.dev = req->handle; 8.449 - preq.sector_number = req->sector_number; 8.450 - preq.nr_sects = 0; 8.451 + preq.dev = req->handle; 8.452 + preq.sector_number = req->sector_number; 8.453 + preq.nr_sects = 0; 8.454 8.455 - for ( i = 0; i < nseg; i++ ) 8.456 - { 8.457 - fas = req->frame_and_sects[i]; 8.458 - seg[i].nsec = blkif_last_sect(fas) - blkif_first_sect(fas) + 1; 8.459 + for (i = 0; i < nseg; i++) { 8.460 + fas = req->frame_and_sects[i]; 8.461 + seg[i].nsec = blkif_last_sect(fas) - blkif_first_sect(fas) + 1; 8.462 + 8.463 + if (seg[i].nsec <= 0) 8.464 + goto bad_descriptor; 8.465 + preq.nr_sects += seg[i].nsec; 8.466 8.467 - if ( seg[i].nsec <= 0 ) 8.468 - goto bad_descriptor; 8.469 - preq.nr_sects += seg[i].nsec; 8.470 - 8.471 - map[i].host_addr = MMAP_VADDR(pending_idx, i); 8.472 - map[i].dom = blkif->domid; 8.473 - map[i].ref = blkif_gref_from_fas(fas); 8.474 - map[i].flags = GNTMAP_host_map; 8.475 - if ( operation == WRITE ) 8.476 - map[i].flags |= GNTMAP_readonly; 8.477 - } 8.478 + map[i].host_addr = MMAP_VADDR(pending_idx, i); 8.479 + map[i].dom = blkif->domid; 8.480 + map[i].ref = blkif_gref_from_fas(fas); 8.481 + map[i].flags = GNTMAP_host_map; 8.482 + if ( operation == WRITE ) 8.483 + map[i].flags |= GNTMAP_readonly; 8.484 + } 8.485 8.486 - if ( unlikely(HYPERVISOR_grant_table_op( 8.487 - GNTTABOP_map_grant_ref, map, nseg))) 8.488 - BUG(); 8.489 + BUG_ON(HYPERVISOR_grant_table_op( 8.490 + GNTTABOP_map_grant_ref, map, nseg)); 8.491 8.492 - for ( i = 0; i < nseg; i++ ) 8.493 - { 8.494 - if ( unlikely(map[i].handle < 0) ) 8.495 - { 8.496 - DPRINTK("invalid buffer -- could not remap it\n"); 8.497 - fast_flush_area(pending_idx, nseg); 8.498 - goto bad_descriptor; 8.499 - } 8.500 + for (i = 0; i < nseg; i++) { 8.501 + if (unlikely(map[i].handle < 0)) { 8.502 + DPRINTK("invalid buffer -- could not remap it\n"); 8.503 + fast_flush_area(pending_idx, nseg); 8.504 + goto bad_descriptor; 8.505 + } 8.506 8.507 - phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] = 8.508 - FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT); 8.509 + phys_to_machine_mapping[__pa(MMAP_VADDR( 8.510 + pending_idx, i)) >> PAGE_SHIFT] = 8.511 + FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT); 8.512 8.513 - pending_handle(pending_idx, i) = map[i].handle; 8.514 - } 8.515 + pending_handle(pending_idx, i) = map[i].handle; 8.516 + } 8.517 8.518 - for ( i = 0; i < nseg; i++ ) 8.519 - { 8.520 - fas = req->frame_and_sects[i]; 8.521 - seg[i].buf = map[i].dev_bus_addr | (blkif_first_sect(fas) << 9); 8.522 - } 8.523 + for (i = 0; i < nseg; i++) { 8.524 + fas = req->frame_and_sects[i]; 8.525 + seg[i].buf = map[i].dev_bus_addr | 8.526 + (blkif_first_sect(fas) << 9); 8.527 + } 8.528 8.529 - if ( vbd_translate(&preq, blkif, operation) != 0 ) 8.530 - { 8.531 - DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 8.532 - operation == READ ? "read" : "write", preq.sector_number, 8.533 - preq.sector_number + preq.nr_sects, preq.dev); 8.534 - goto bad_descriptor; 8.535 - } 8.536 + if (vbd_translate(&preq, blkif, operation) != 0) { 8.537 + DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 8.538 + operation == READ ? "read" : "write", 8.539 + preq.sector_number, 8.540 + preq.sector_number + preq.nr_sects, preq.dev); 8.541 + goto bad_descriptor; 8.542 + } 8.543 8.544 - pending_req = &pending_reqs[pending_idx]; 8.545 - pending_req->blkif = blkif; 8.546 - pending_req->id = req->id; 8.547 - pending_req->operation = operation; 8.548 - pending_req->status = BLKIF_RSP_OKAY; 8.549 - pending_req->nr_pages = nseg; 8.550 + pending_req = &pending_reqs[pending_idx]; 8.551 + pending_req->blkif = blkif; 8.552 + pending_req->id = req->id; 8.553 + pending_req->operation = operation; 8.554 + pending_req->status = BLKIF_RSP_OKAY; 8.555 + pending_req->nr_pages = nseg; 8.556 8.557 - for ( i = 0; i < nseg; i++ ) 8.558 - { 8.559 - if ( ((int)preq.sector_number|(int)seg[i].nsec) & 8.560 - ((bdev_hardsect_size(preq.bdev) >> 9) - 1) ) 8.561 - { 8.562 - DPRINTK("Misaligned I/O request from domain %d", blkif->domid); 8.563 - goto cleanup_and_fail; 8.564 - } 8.565 + for (i = 0; i < nseg; i++) { 8.566 + if (((int)preq.sector_number|(int)seg[i].nsec) & 8.567 + ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) { 8.568 + DPRINTK("Misaligned I/O request from domain %d", 8.569 + blkif->domid); 8.570 + goto cleanup_and_fail; 8.571 + } 8.572 8.573 - while ( (bio == NULL) || 8.574 - (bio_add_page(bio, 8.575 - virt_to_page(MMAP_VADDR(pending_idx, i)), 8.576 - seg[i].nsec << 9, 8.577 - seg[i].buf & ~PAGE_MASK) == 0) ) 8.578 - { 8.579 - bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i); 8.580 - if ( unlikely(bio == NULL) ) 8.581 - { 8.582 - cleanup_and_fail: 8.583 - for ( i = 0; i < (nbio-1); i++ ) 8.584 - bio_put(biolist[i]); 8.585 - fast_flush_area(pending_idx, nseg); 8.586 - goto bad_descriptor; 8.587 - } 8.588 + while ((bio == NULL) || 8.589 + (bio_add_page(bio, 8.590 + virt_to_page(MMAP_VADDR(pending_idx, i)), 8.591 + seg[i].nsec << 9, 8.592 + seg[i].buf & ~PAGE_MASK) == 0)) { 8.593 + bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i); 8.594 + if (unlikely(bio == NULL)) { 8.595 + cleanup_and_fail: 8.596 + for (i = 0; i < (nbio-1); i++) 8.597 + bio_put(biolist[i]); 8.598 + fast_flush_area(pending_idx, nseg); 8.599 + goto bad_descriptor; 8.600 + } 8.601 8.602 - bio->bi_bdev = preq.bdev; 8.603 - bio->bi_private = pending_req; 8.604 - bio->bi_end_io = end_block_io_op; 8.605 - bio->bi_sector = preq.sector_number; 8.606 - } 8.607 + bio->bi_bdev = preq.bdev; 8.608 + bio->bi_private = pending_req; 8.609 + bio->bi_end_io = end_block_io_op; 8.610 + bio->bi_sector = preq.sector_number; 8.611 + } 8.612 8.613 - preq.sector_number += seg[i].nsec; 8.614 - } 8.615 + preq.sector_number += seg[i].nsec; 8.616 + } 8.617 8.618 - if ( (q = bdev_get_queue(bio->bi_bdev)) != plugged_queue ) 8.619 - { 8.620 - flush_plugged_queue(); 8.621 - blk_get_queue(q); 8.622 - plugged_queue = q; 8.623 - } 8.624 + if ((q = bdev_get_queue(bio->bi_bdev)) != plugged_queue) { 8.625 + flush_plugged_queue(); 8.626 + blk_get_queue(q); 8.627 + plugged_queue = q; 8.628 + } 8.629 8.630 - atomic_set(&pending_req->pendcnt, nbio); 8.631 - pending_cons++; 8.632 - blkif_get(blkif); 8.633 + atomic_set(&pending_req->pendcnt, nbio); 8.634 + pending_cons++; 8.635 + blkif_get(blkif); 8.636 8.637 - for ( i = 0; i < nbio; i++ ) 8.638 - submit_bio(operation, biolist[i]); 8.639 + for (i = 0; i < nbio; i++) 8.640 + submit_bio(operation, biolist[i]); 8.641 8.642 - return; 8.643 + return; 8.644 8.645 bad_descriptor: 8.646 - make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); 8.647 + make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); 8.648 } 8.649 8.650 8.651 @@ -475,66 +465,71 @@ static void dispatch_rw_block_io(blkif_t 8.652 static void make_response(blkif_t *blkif, unsigned long id, 8.653 unsigned short op, int st) 8.654 { 8.655 - blkif_response_t *resp; 8.656 - unsigned long flags; 8.657 - blkif_back_ring_t *blk_ring = &blkif->blk_ring; 8.658 + blkif_response_t *resp; 8.659 + unsigned long flags; 8.660 + blkif_back_ring_t *blk_ring = &blkif->blk_ring; 8.661 8.662 - /* Place on the response ring for the relevant domain. */ 8.663 - spin_lock_irqsave(&blkif->blk_ring_lock, flags); 8.664 - resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt); 8.665 - resp->id = id; 8.666 - resp->operation = op; 8.667 - resp->status = st; 8.668 - wmb(); /* Ensure other side can see the response fields. */ 8.669 - blk_ring->rsp_prod_pvt++; 8.670 - RING_PUSH_RESPONSES(blk_ring); 8.671 - spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); 8.672 + /* Place on the response ring for the relevant domain. */ 8.673 + spin_lock_irqsave(&blkif->blk_ring_lock, flags); 8.674 + resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt); 8.675 + resp->id = id; 8.676 + resp->operation = op; 8.677 + resp->status = st; 8.678 + wmb(); /* Ensure other side can see the response fields. */ 8.679 + blk_ring->rsp_prod_pvt++; 8.680 + RING_PUSH_RESPONSES(blk_ring); 8.681 + spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); 8.682 8.683 - /* Kick the relevant domain. */ 8.684 - notify_via_evtchn(blkif->evtchn); 8.685 + /* Kick the relevant domain. */ 8.686 + notify_via_evtchn(blkif->evtchn); 8.687 } 8.688 8.689 void blkif_deschedule(blkif_t *blkif) 8.690 { 8.691 - remove_from_blkdev_list(blkif); 8.692 + remove_from_blkdev_list(blkif); 8.693 } 8.694 8.695 static int __init blkif_init(void) 8.696 { 8.697 - int i; 8.698 - struct page *page; 8.699 + int i; 8.700 + struct page *page; 8.701 8.702 - if ( !(xen_start_info->flags & SIF_INITDOMAIN) && 8.703 - !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) ) 8.704 - return 0; 8.705 + if (!(xen_start_info->flags & SIF_INITDOMAIN) && 8.706 + !(xen_start_info->flags & SIF_BLK_BE_DOMAIN)) 8.707 + return 0; 8.708 8.709 - blkif_interface_init(); 8.710 + blkif_interface_init(); 8.711 8.712 - page = balloon_alloc_empty_page_range(MMAP_PAGES); 8.713 - BUG_ON(page == NULL); 8.714 - mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); 8.715 + page = balloon_alloc_empty_page_range(MMAP_PAGES); 8.716 + BUG_ON(page == NULL); 8.717 + mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); 8.718 8.719 - pending_cons = 0; 8.720 - pending_prod = MAX_PENDING_REQS; 8.721 - memset(pending_reqs, 0, sizeof(pending_reqs)); 8.722 - for ( i = 0; i < MAX_PENDING_REQS; i++ ) 8.723 - pending_ring[i] = i; 8.724 + pending_cons = 0; 8.725 + pending_prod = MAX_PENDING_REQS; 8.726 + memset(pending_reqs, 0, sizeof(pending_reqs)); 8.727 + for (i = 0; i < MAX_PENDING_REQS; i++) 8.728 + pending_ring[i] = i; 8.729 8.730 - spin_lock_init(&blkio_schedule_list_lock); 8.731 - INIT_LIST_HEAD(&blkio_schedule_list); 8.732 + spin_lock_init(&blkio_schedule_list_lock); 8.733 + INIT_LIST_HEAD(&blkio_schedule_list); 8.734 8.735 - if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 ) 8.736 - BUG(); 8.737 + BUG_ON(kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0); 8.738 8.739 - blkif_xenbus_init(); 8.740 + blkif_xenbus_init(); 8.741 8.742 - memset( pending_grant_handles, BLKBACK_INVALID_HANDLE, MMAP_PAGES ); 8.743 + memset(pending_grant_handles, BLKBACK_INVALID_HANDLE, MMAP_PAGES); 8.744 8.745 -#ifdef CONFIG_XEN_BLKDEV_TAP_BE 8.746 - printk(KERN_ALERT "NOTE: Blkif backend is running with tap support on!\n"); 8.747 -#endif 8.748 - 8.749 - return 0; 8.750 + return 0; 8.751 } 8.752 8.753 __initcall(blkif_init); 8.754 + 8.755 +/* 8.756 + * Local variables: 8.757 + * c-file-style: "linux" 8.758 + * indent-tabs-mode: t 8.759 + * c-indent-level: 8 8.760 + * c-basic-offset: 8 8.761 + * tab-width: 8 8.762 + * End: 8.763 + */
9.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/common.h Thu Sep 22 16:05:44 2005 +0100 9.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/common.h Thu Sep 22 16:12:14 2005 +0100 9.3 @@ -31,39 +31,39 @@ 9.4 #endif 9.5 9.6 struct vbd { 9.7 - blkif_vdev_t handle; /* what the domain refers to this vbd as */ 9.8 - unsigned char readonly; /* Non-zero -> read-only */ 9.9 - unsigned char type; /* VDISK_xxx */ 9.10 - u32 pdevice; /* phys device that this vbd maps to */ 9.11 - struct block_device *bdev; 9.12 + blkif_vdev_t handle; /* what the domain refers to this vbd as */ 9.13 + unsigned char readonly; /* Non-zero -> read-only */ 9.14 + unsigned char type; /* VDISK_xxx */ 9.15 + u32 pdevice; /* phys device that this vbd maps to */ 9.16 + struct block_device *bdev; 9.17 }; 9.18 9.19 typedef struct blkif_st { 9.20 - /* Unique identifier for this interface. */ 9.21 - domid_t domid; 9.22 - unsigned int handle; 9.23 - /* Physical parameters of the comms window. */ 9.24 - unsigned int evtchn; 9.25 - unsigned int remote_evtchn; 9.26 - /* Comms information. */ 9.27 - blkif_back_ring_t blk_ring; 9.28 - struct vm_struct *blk_ring_area; 9.29 - /* VBDs attached to this interface. */ 9.30 - struct vbd vbd; 9.31 - /* Private fields. */ 9.32 - enum { DISCONNECTED, CONNECTED } status; 9.33 + /* Unique identifier for this interface. */ 9.34 + domid_t domid; 9.35 + unsigned int handle; 9.36 + /* Physical parameters of the comms window. */ 9.37 + unsigned int evtchn; 9.38 + unsigned int remote_evtchn; 9.39 + /* Comms information. */ 9.40 + blkif_back_ring_t blk_ring; 9.41 + struct vm_struct *blk_ring_area; 9.42 + /* VBDs attached to this interface. */ 9.43 + struct vbd vbd; 9.44 + /* Private fields. */ 9.45 + enum { DISCONNECTED, CONNECTED } status; 9.46 #ifdef CONFIG_XEN_BLKDEV_TAP_BE 9.47 - /* Is this a blktap frontend */ 9.48 - unsigned int is_blktap; 9.49 + /* Is this a blktap frontend */ 9.50 + unsigned int is_blktap; 9.51 #endif 9.52 - struct list_head blkdev_list; 9.53 - spinlock_t blk_ring_lock; 9.54 - atomic_t refcnt; 9.55 + struct list_head blkdev_list; 9.56 + spinlock_t blk_ring_lock; 9.57 + atomic_t refcnt; 9.58 9.59 - struct work_struct free_work; 9.60 + struct work_struct free_work; 9.61 9.62 - u16 shmem_handle; 9.63 - grant_ref_t shmem_ref; 9.64 + u16 shmem_handle; 9.65 + grant_ref_t shmem_ref; 9.66 } blkif_t; 9.67 9.68 blkif_t *alloc_blkif(domid_t domid); 9.69 @@ -71,11 +71,11 @@ void free_blkif_callback(blkif_t *blkif) 9.70 int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn); 9.71 9.72 #define blkif_get(_b) (atomic_inc(&(_b)->refcnt)) 9.73 -#define blkif_put(_b) \ 9.74 - do { \ 9.75 - if ( atomic_dec_and_test(&(_b)->refcnt) ) \ 9.76 - free_blkif_callback(_b); \ 9.77 - } while (0) 9.78 +#define blkif_put(_b) \ 9.79 + do { \ 9.80 + if (atomic_dec_and_test(&(_b)->refcnt)) \ 9.81 + free_blkif_callback(_b); \ 9.82 + } while (0) 9.83 9.84 /* Create a vbd. */ 9.85 int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, u32 pdevice, 9.86 @@ -87,10 +87,10 @@ unsigned int vbd_info(struct vbd *vbd); 9.87 unsigned long vbd_secsize(struct vbd *vbd); 9.88 9.89 struct phys_req { 9.90 - unsigned short dev; 9.91 - unsigned short nr_sects; 9.92 - struct block_device *bdev; 9.93 - blkif_sector_t sector_number; 9.94 + unsigned short dev; 9.95 + unsigned short nr_sects; 9.96 + struct block_device *bdev; 9.97 + blkif_sector_t sector_number; 9.98 }; 9.99 9.100 int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation); 9.101 @@ -104,3 +104,13 @@ void blkif_xenbus_init(void); 9.102 irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs); 9.103 9.104 #endif /* __BLKIF__BACKEND__COMMON_H__ */ 9.105 + 9.106 +/* 9.107 + * Local variables: 9.108 + * c-file-style: "linux" 9.109 + * indent-tabs-mode: t 9.110 + * c-indent-level: 8 9.111 + * c-basic-offset: 8 9.112 + * tab-width: 8 9.113 + * End: 9.114 + */
10.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c Thu Sep 22 16:05:44 2005 +0100 10.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c Thu Sep 22 16:12:14 2005 +0100 10.3 @@ -13,134 +13,144 @@ static kmem_cache_t *blkif_cachep; 10.4 10.5 blkif_t *alloc_blkif(domid_t domid) 10.6 { 10.7 - blkif_t *blkif; 10.8 + blkif_t *blkif; 10.9 10.10 - blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL); 10.11 - if (!blkif) 10.12 - return ERR_PTR(-ENOMEM); 10.13 + blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL); 10.14 + if (!blkif) 10.15 + return ERR_PTR(-ENOMEM); 10.16 10.17 - memset(blkif, 0, sizeof(*blkif)); 10.18 - blkif->domid = domid; 10.19 - blkif->status = DISCONNECTED; 10.20 - spin_lock_init(&blkif->blk_ring_lock); 10.21 - atomic_set(&blkif->refcnt, 1); 10.22 + memset(blkif, 0, sizeof(*blkif)); 10.23 + blkif->domid = domid; 10.24 + blkif->status = DISCONNECTED; 10.25 + spin_lock_init(&blkif->blk_ring_lock); 10.26 + atomic_set(&blkif->refcnt, 1); 10.27 10.28 - return blkif; 10.29 + return blkif; 10.30 } 10.31 10.32 static int map_frontend_page(blkif_t *blkif, unsigned long shared_page) 10.33 { 10.34 - struct gnttab_map_grant_ref op; 10.35 + struct gnttab_map_grant_ref op; 10.36 10.37 - op.host_addr = (unsigned long)blkif->blk_ring_area->addr; 10.38 - op.flags = GNTMAP_host_map; 10.39 - op.ref = shared_page; 10.40 - op.dom = blkif->domid; 10.41 + op.host_addr = (unsigned long)blkif->blk_ring_area->addr; 10.42 + op.flags = GNTMAP_host_map; 10.43 + op.ref = shared_page; 10.44 + op.dom = blkif->domid; 10.45 10.46 - lock_vm_area(blkif->blk_ring_area); 10.47 - BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)); 10.48 - unlock_vm_area(blkif->blk_ring_area); 10.49 + lock_vm_area(blkif->blk_ring_area); 10.50 + BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)); 10.51 + unlock_vm_area(blkif->blk_ring_area); 10.52 10.53 - if (op.handle < 0) { 10.54 - DPRINTK(" Grant table operation failure !\n"); 10.55 - return op.handle; 10.56 - } 10.57 + if (op.handle < 0) { 10.58 + DPRINTK(" Grant table operation failure !\n"); 10.59 + return op.handle; 10.60 + } 10.61 10.62 - blkif->shmem_ref = shared_page; 10.63 - blkif->shmem_handle = op.handle; 10.64 + blkif->shmem_ref = shared_page; 10.65 + blkif->shmem_handle = op.handle; 10.66 10.67 - return 0; 10.68 + return 0; 10.69 } 10.70 10.71 static void unmap_frontend_page(blkif_t *blkif) 10.72 { 10.73 - struct gnttab_unmap_grant_ref op; 10.74 + struct gnttab_unmap_grant_ref op; 10.75 10.76 - op.host_addr = (unsigned long)blkif->blk_ring_area->addr; 10.77 - op.handle = blkif->shmem_handle; 10.78 - op.dev_bus_addr = 0; 10.79 + op.host_addr = (unsigned long)blkif->blk_ring_area->addr; 10.80 + op.handle = blkif->shmem_handle; 10.81 + op.dev_bus_addr = 0; 10.82 10.83 - lock_vm_area(blkif->blk_ring_area); 10.84 - BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)); 10.85 - unlock_vm_area(blkif->blk_ring_area); 10.86 + lock_vm_area(blkif->blk_ring_area); 10.87 + BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)); 10.88 + unlock_vm_area(blkif->blk_ring_area); 10.89 } 10.90 10.91 int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn) 10.92 { 10.93 - blkif_sring_t *sring; 10.94 - evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain }; 10.95 - int err; 10.96 + blkif_sring_t *sring; 10.97 + evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain }; 10.98 + int err; 10.99 10.100 - BUG_ON(blkif->remote_evtchn); 10.101 + BUG_ON(blkif->remote_evtchn); 10.102 10.103 - if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL ) 10.104 - return -ENOMEM; 10.105 + if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL ) 10.106 + return -ENOMEM; 10.107 10.108 - err = map_frontend_page(blkif, shared_page); 10.109 - if (err) { 10.110 - free_vm_area(blkif->blk_ring_area); 10.111 - return err; 10.112 - } 10.113 + err = map_frontend_page(blkif, shared_page); 10.114 + if (err) { 10.115 + free_vm_area(blkif->blk_ring_area); 10.116 + return err; 10.117 + } 10.118 10.119 - op.u.bind_interdomain.dom1 = DOMID_SELF; 10.120 - op.u.bind_interdomain.dom2 = blkif->domid; 10.121 - op.u.bind_interdomain.port1 = 0; 10.122 - op.u.bind_interdomain.port2 = evtchn; 10.123 - err = HYPERVISOR_event_channel_op(&op); 10.124 - if (err) { 10.125 - unmap_frontend_page(blkif); 10.126 - free_vm_area(blkif->blk_ring_area); 10.127 - return err; 10.128 - } 10.129 + op.u.bind_interdomain.dom1 = DOMID_SELF; 10.130 + op.u.bind_interdomain.dom2 = blkif->domid; 10.131 + op.u.bind_interdomain.port1 = 0; 10.132 + op.u.bind_interdomain.port2 = evtchn; 10.133 + err = HYPERVISOR_event_channel_op(&op); 10.134 + if (err) { 10.135 + unmap_frontend_page(blkif); 10.136 + free_vm_area(blkif->blk_ring_area); 10.137 + return err; 10.138 + } 10.139 10.140 - blkif->evtchn = op.u.bind_interdomain.port1; 10.141 - blkif->remote_evtchn = evtchn; 10.142 + blkif->evtchn = op.u.bind_interdomain.port1; 10.143 + blkif->remote_evtchn = evtchn; 10.144 10.145 - sring = (blkif_sring_t *)blkif->blk_ring_area->addr; 10.146 - SHARED_RING_INIT(sring); 10.147 - BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE); 10.148 + sring = (blkif_sring_t *)blkif->blk_ring_area->addr; 10.149 + SHARED_RING_INIT(sring); 10.150 + BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE); 10.151 10.152 - bind_evtchn_to_irqhandler(blkif->evtchn, blkif_be_int, 0, "blkif-backend", 10.153 - blkif); 10.154 - blkif->status = CONNECTED; 10.155 + bind_evtchn_to_irqhandler( 10.156 + blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif); 10.157 + blkif->status = CONNECTED; 10.158 10.159 - return 0; 10.160 + return 0; 10.161 } 10.162 10.163 static void free_blkif(void *arg) 10.164 { 10.165 - evtchn_op_t op = { .cmd = EVTCHNOP_close }; 10.166 - blkif_t *blkif = (blkif_t *)arg; 10.167 + evtchn_op_t op = { .cmd = EVTCHNOP_close }; 10.168 + blkif_t *blkif = (blkif_t *)arg; 10.169 10.170 - op.u.close.port = blkif->evtchn; 10.171 - op.u.close.dom = DOMID_SELF; 10.172 - HYPERVISOR_event_channel_op(&op); 10.173 - op.u.close.port = blkif->remote_evtchn; 10.174 - op.u.close.dom = blkif->domid; 10.175 - HYPERVISOR_event_channel_op(&op); 10.176 + op.u.close.port = blkif->evtchn; 10.177 + op.u.close.dom = DOMID_SELF; 10.178 + HYPERVISOR_event_channel_op(&op); 10.179 + op.u.close.port = blkif->remote_evtchn; 10.180 + op.u.close.dom = blkif->domid; 10.181 + HYPERVISOR_event_channel_op(&op); 10.182 10.183 - vbd_free(&blkif->vbd); 10.184 + vbd_free(&blkif->vbd); 10.185 10.186 - if (blkif->evtchn) 10.187 - unbind_evtchn_from_irqhandler(blkif->evtchn, blkif); 10.188 + if (blkif->evtchn) 10.189 + unbind_evtchn_from_irqhandler(blkif->evtchn, blkif); 10.190 10.191 - if (blkif->blk_ring.sring) { 10.192 - unmap_frontend_page(blkif); 10.193 - free_vm_area(blkif->blk_ring_area); 10.194 - blkif->blk_ring.sring = NULL; 10.195 - } 10.196 + if (blkif->blk_ring.sring) { 10.197 + unmap_frontend_page(blkif); 10.198 + free_vm_area(blkif->blk_ring_area); 10.199 + blkif->blk_ring.sring = NULL; 10.200 + } 10.201 10.202 - kmem_cache_free(blkif_cachep, blkif); 10.203 + kmem_cache_free(blkif_cachep, blkif); 10.204 } 10.205 10.206 void free_blkif_callback(blkif_t *blkif) 10.207 { 10.208 - INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif); 10.209 - schedule_work(&blkif->free_work); 10.210 + INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif); 10.211 + schedule_work(&blkif->free_work); 10.212 } 10.213 10.214 void __init blkif_interface_init(void) 10.215 { 10.216 - blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 10.217 - 0, 0, NULL, NULL); 10.218 + blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 10.219 + 0, 0, NULL, NULL); 10.220 } 10.221 + 10.222 +/* 10.223 + * Local variables: 10.224 + * c-file-style: "linux" 10.225 + * indent-tabs-mode: t 10.226 + * c-indent-level: 8 10.227 + * c-basic-offset: 8 10.228 + * tab-width: 8 10.229 + * End: 10.230 + */
11.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c Thu Sep 22 16:05:44 2005 +0100 11.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/vbd.c Thu Sep 22 16:12:14 2005 +0100 11.3 @@ -11,10 +11,10 @@ 11.4 11.5 static inline dev_t vbd_map_devnum(u32 cookie) 11.6 { 11.7 - return MKDEV(BLKIF_MAJOR(cookie), BLKIF_MINOR(cookie)); 11.8 + return MKDEV(BLKIF_MAJOR(cookie), BLKIF_MINOR(cookie)); 11.9 } 11.10 -#define vbd_sz(_v) ((_v)->bdev->bd_part ? \ 11.11 - (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity) 11.12 +#define vbd_sz(_v) ((_v)->bdev->bd_part ? \ 11.13 + (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity) 11.14 #define bdev_put(_b) blkdev_put(_b) 11.15 11.16 unsigned long vbd_size(struct vbd *vbd) 11.17 @@ -35,63 +35,73 @@ unsigned long vbd_secsize(struct vbd *vb 11.18 int vbd_create(blkif_t *blkif, blkif_vdev_t handle, 11.19 u32 pdevice, int readonly) 11.20 { 11.21 - struct vbd *vbd; 11.22 + struct vbd *vbd; 11.23 11.24 - vbd = &blkif->vbd; 11.25 - vbd->handle = handle; 11.26 - vbd->readonly = readonly; 11.27 - vbd->type = 0; 11.28 + vbd = &blkif->vbd; 11.29 + vbd->handle = handle; 11.30 + vbd->readonly = readonly; 11.31 + vbd->type = 0; 11.32 11.33 - vbd->pdevice = pdevice; 11.34 + vbd->pdevice = pdevice; 11.35 11.36 - vbd->bdev = open_by_devnum( 11.37 - vbd_map_devnum(vbd->pdevice), 11.38 - vbd->readonly ? FMODE_READ : FMODE_WRITE); 11.39 - if ( IS_ERR(vbd->bdev) ) 11.40 - { 11.41 - DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice); 11.42 - return -ENOENT; 11.43 - } 11.44 + vbd->bdev = open_by_devnum( 11.45 + vbd_map_devnum(vbd->pdevice), 11.46 + vbd->readonly ? FMODE_READ : FMODE_WRITE); 11.47 + if (IS_ERR(vbd->bdev)) { 11.48 + DPRINTK("vbd_creat: device %08x doesn't exist.\n", 11.49 + vbd->pdevice); 11.50 + return -ENOENT; 11.51 + } 11.52 11.53 - if ( (vbd->bdev->bd_disk == NULL) ) 11.54 - { 11.55 - DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice); 11.56 - vbd_free(vbd); 11.57 - return -ENOENT; 11.58 - } 11.59 + if (vbd->bdev->bd_disk == NULL) { 11.60 + DPRINTK("vbd_creat: device %08x doesn't exist.\n", 11.61 + vbd->pdevice); 11.62 + vbd_free(vbd); 11.63 + return -ENOENT; 11.64 + } 11.65 11.66 - if ( vbd->bdev->bd_disk->flags & GENHD_FL_CD ) 11.67 - vbd->type |= VDISK_CDROM; 11.68 - if ( vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE ) 11.69 - vbd->type |= VDISK_REMOVABLE; 11.70 + if (vbd->bdev->bd_disk->flags & GENHD_FL_CD) 11.71 + vbd->type |= VDISK_CDROM; 11.72 + if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE) 11.73 + vbd->type |= VDISK_REMOVABLE; 11.74 11.75 - DPRINTK("Successful creation of handle=%04x (dom=%u)\n", 11.76 - handle, blkif->domid); 11.77 - return 0; 11.78 + DPRINTK("Successful creation of handle=%04x (dom=%u)\n", 11.79 + handle, blkif->domid); 11.80 + return 0; 11.81 } 11.82 11.83 void vbd_free(struct vbd *vbd) 11.84 { 11.85 - if (vbd->bdev) 11.86 - bdev_put(vbd->bdev); 11.87 - vbd->bdev = NULL; 11.88 + if (vbd->bdev) 11.89 + bdev_put(vbd->bdev); 11.90 + vbd->bdev = NULL; 11.91 } 11.92 11.93 int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation) 11.94 { 11.95 - struct vbd *vbd = &blkif->vbd; 11.96 - int rc = -EACCES; 11.97 + struct vbd *vbd = &blkif->vbd; 11.98 + int rc = -EACCES; 11.99 11.100 - if ((operation == WRITE) && vbd->readonly) 11.101 - goto out; 11.102 + if ((operation == WRITE) && vbd->readonly) 11.103 + goto out; 11.104 11.105 - if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd))) 11.106 - goto out; 11.107 + if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd))) 11.108 + goto out; 11.109 11.110 - req->dev = vbd->pdevice; 11.111 - req->bdev = vbd->bdev; 11.112 - rc = 0; 11.113 + req->dev = vbd->pdevice; 11.114 + req->bdev = vbd->bdev; 11.115 + rc = 0; 11.116 11.117 out: 11.118 - return rc; 11.119 + return rc; 11.120 } 11.121 + 11.122 +/* 11.123 + * Local variables: 11.124 + * c-file-style: "linux" 11.125 + * indent-tabs-mode: t 11.126 + * c-indent-level: 8 11.127 + * c-basic-offset: 8 11.128 + * tab-width: 8 11.129 + * End: 11.130 + */
12.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c Thu Sep 22 16:05:44 2005 +0100 12.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c Thu Sep 22 16:12:14 2005 +0100 12.3 @@ -124,7 +124,7 @@ static void frontend_changed(struct xenb 12.4 12.5 return; 12.6 12.7 -abort: 12.8 + abort: 12.9 xenbus_transaction_end(1); 12.10 } 12.11 12.12 @@ -278,3 +278,13 @@ void blkif_xenbus_init(void) 12.13 { 12.14 xenbus_register_backend(&blkback); 12.15 } 12.16 + 12.17 +/* 12.18 + * Local variables: 12.19 + * c-file-style: "linux" 12.20 + * indent-tabs-mode: t 12.21 + * c-indent-level: 8 12.22 + * c-basic-offset: 8 12.23 + * tab-width: 8 12.24 + * End: 12.25 + */
13.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Thu Sep 22 16:05:44 2005 +0100 13.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Thu Sep 22 16:12:14 2005 +0100 13.3 @@ -146,4 +146,15 @@ extern void do_blkif_request (request_qu 13.4 int xlvbd_add(blkif_sector_t capacity, int device, 13.5 u16 vdisk_info, u16 sector_size, struct blkfront_info *info); 13.6 void xlvbd_del(struct blkfront_info *info); 13.7 + 13.8 #endif /* __XEN_DRIVERS_BLOCK_H__ */ 13.9 + 13.10 +/* 13.11 + * Local variables: 13.12 + * c-file-style: "linux" 13.13 + * indent-tabs-mode: t 13.14 + * c-indent-level: 8 13.15 + * c-basic-offset: 8 13.16 + * tab-width: 8 13.17 + * End: 13.18 + */
14.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c Thu Sep 22 16:05:44 2005 +0100 14.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c Thu Sep 22 16:12:14 2005 +0100 14.3 @@ -65,7 +65,7 @@ static struct xlbd_type_info xlbd_vbd_ty 14.4 }; 14.5 14.6 static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS + 14.7 - NUM_VBD_MAJORS]; 14.8 + NUM_VBD_MAJORS]; 14.9 14.10 #define XLBD_MAJOR_IDE_START 0 14.11 #define XLBD_MAJOR_SCSI_START (NUM_IDE_MAJORS) 14.12 @@ -309,3 +309,13 @@ xlvbd_del(struct blkfront_info *info) 14.13 14.14 bdput(bd); 14.15 } 14.16 + 14.17 +/* 14.18 + * Local variables: 14.19 + * c-file-style: "linux" 14.20 + * indent-tabs-mode: t 14.21 + * c-indent-level: 8 14.22 + * c-basic-offset: 8 14.23 + * tab-width: 8 14.24 + * End: 14.25 + */
15.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c Thu Sep 22 16:05:44 2005 +0100 15.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c Thu Sep 22 16:12:14 2005 +0100 15.3 @@ -4,7 +4,6 @@ 15.4 * This is a modified version of the block backend driver that remaps requests 15.5 * to a user-space memory region. It is intended to be used to write 15.6 * application-level servers that provide block interfaces to client VMs. 15.7 - * 15.8 */ 15.9 15.10 #include <linux/kernel.h> 15.11 @@ -67,20 +66,19 @@ static int blktap_read_ufe_ring(void); 15.12 15.13 static inline int BLKTAP_MODE_VALID(unsigned long arg) 15.14 { 15.15 - return ( 15.16 - ( arg == BLKTAP_MODE_PASSTHROUGH ) || 15.17 - ( arg == BLKTAP_MODE_INTERCEPT_FE ) || 15.18 - ( arg == BLKTAP_MODE_INTERPOSE ) ); 15.19 + return ((arg == BLKTAP_MODE_PASSTHROUGH ) || 15.20 + (arg == BLKTAP_MODE_INTERCEPT_FE) || 15.21 + (arg == BLKTAP_MODE_INTERPOSE )); 15.22 /* 15.23 - return ( 15.24 - ( arg == BLKTAP_MODE_PASSTHROUGH ) || 15.25 - ( arg == BLKTAP_MODE_INTERCEPT_FE ) || 15.26 - ( arg == BLKTAP_MODE_INTERCEPT_BE ) || 15.27 - ( arg == BLKTAP_MODE_INTERPOSE ) || 15.28 - ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) || 15.29 - ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) || 15.30 - ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH ) 15.31 - ); 15.32 + return ( 15.33 + ( arg == BLKTAP_MODE_PASSTHROUGH ) || 15.34 + ( arg == BLKTAP_MODE_INTERCEPT_FE ) || 15.35 + ( arg == BLKTAP_MODE_INTERCEPT_BE ) || 15.36 + ( arg == BLKTAP_MODE_INTERPOSE ) || 15.37 + ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) || 15.38 + ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) || 15.39 + ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH ) 15.40 + ); 15.41 */ 15.42 } 15.43 15.44 @@ -110,14 +108,12 @@ unsigned long mmap_vstart; /* Kernel pa 15.45 unsigned long rings_vstart; /* start of mmaped vma */ 15.46 unsigned long user_vstart; /* start of user mappings */ 15.47 15.48 -#define MMAP_PAGES \ 15.49 - (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST) 15.50 -#define MMAP_VADDR(_start, _req,_seg) \ 15.51 - (_start + \ 15.52 - ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \ 15.53 - ((_seg) * PAGE_SIZE)) 15.54 - 15.55 - 15.56 +#define MMAP_PAGES \ 15.57 + (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST) 15.58 +#define MMAP_VADDR(_start, _req,_seg) \ 15.59 + (_start + \ 15.60 + ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \ 15.61 + ((_seg) * PAGE_SIZE)) 15.62 15.63 /* 15.64 * Each outstanding request that we've passed to the lower device layers has a 15.65 @@ -126,12 +122,12 @@ unsigned long user_vstart; /* start of 15.66 * response queued for it, with the saved 'id' passed back. 15.67 */ 15.68 typedef struct { 15.69 - blkif_t *blkif; 15.70 - unsigned long id; 15.71 - int nr_pages; 15.72 - atomic_t pendcnt; 15.73 - unsigned short operation; 15.74 - int status; 15.75 + blkif_t *blkif; 15.76 + unsigned long id; 15.77 + int nr_pages; 15.78 + atomic_t pendcnt; 15.79 + unsigned short operation; 15.80 + int status; 15.81 } pending_req_t; 15.82 15.83 /* 15.84 @@ -156,17 +152,17 @@ static PEND_RING_IDX pending_prod, pendi 15.85 15.86 static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx) 15.87 { 15.88 - return ( (fe_dom << 16) | MASK_PEND_IDX(idx) ); 15.89 + return ((fe_dom << 16) | MASK_PEND_IDX(idx)); 15.90 } 15.91 15.92 extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id) 15.93 { 15.94 - return (PEND_RING_IDX)( id & 0x0000ffff ); 15.95 + return (PEND_RING_IDX)(id & 0x0000ffff); 15.96 } 15.97 15.98 extern inline domid_t ID_TO_DOM(unsigned long id) 15.99 { 15.100 - return (domid_t)(id >> 16); 15.101 + return (domid_t)(id >> 16); 15.102 } 15.103 15.104 15.105 @@ -181,8 +177,8 @@ extern inline domid_t ID_TO_DOM(unsigned 15.106 */ 15.107 struct grant_handle_pair 15.108 { 15.109 - u16 kernel; 15.110 - u16 user; 15.111 + u16 kernel; 15.112 + u16 user; 15.113 }; 15.114 static struct grant_handle_pair pending_grant_handles[MMAP_PAGES]; 15.115 #define pending_handle(_idx, _i) \ 15.116 @@ -199,21 +195,20 @@ static struct grant_handle_pair pending_ 15.117 */ 15.118 15.119 static struct page *blktap_nopage(struct vm_area_struct *vma, 15.120 - unsigned long address, 15.121 - int *type) 15.122 + unsigned long address, 15.123 + int *type) 15.124 { 15.125 - /* 15.126 - * if the page has not been mapped in by the driver then generate 15.127 - * a SIGBUS to the domain. 15.128 - */ 15.129 + /* 15.130 + * if the page has not been mapped in by the driver then generate 15.131 + * a SIGBUS to the domain. 15.132 + */ 15.133 + force_sig(SIGBUS, current); 15.134 15.135 - force_sig(SIGBUS, current); 15.136 - 15.137 - return 0; 15.138 + return 0; 15.139 } 15.140 15.141 struct vm_operations_struct blktap_vm_ops = { 15.142 - nopage: blktap_nopage, 15.143 + nopage: blktap_nopage, 15.144 }; 15.145 15.146 /****************************************************************** 15.147 @@ -222,44 +217,45 @@ struct vm_operations_struct blktap_vm_op 15.148 15.149 static int blktap_open(struct inode *inode, struct file *filp) 15.150 { 15.151 - blkif_sring_t *sring; 15.152 - 15.153 - if ( test_and_set_bit(0, &blktap_dev_inuse) ) 15.154 - return -EBUSY; 15.155 + blkif_sring_t *sring; 15.156 + 15.157 + if (test_and_set_bit(0, &blktap_dev_inuse)) 15.158 + return -EBUSY; 15.159 15.160 - /* Allocate the fe ring. */ 15.161 - sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL); 15.162 - if (sring == NULL) 15.163 - goto fail_nomem; 15.164 + /* Allocate the fe ring. */ 15.165 + sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL); 15.166 + if (sring == NULL) 15.167 + goto fail_nomem; 15.168 15.169 - SetPageReserved(virt_to_page(sring)); 15.170 + SetPageReserved(virt_to_page(sring)); 15.171 15.172 - SHARED_RING_INIT(sring); 15.173 - FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE); 15.174 + SHARED_RING_INIT(sring); 15.175 + FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE); 15.176 15.177 - return 0; 15.178 + return 0; 15.179 15.180 fail_nomem: 15.181 - return -ENOMEM; 15.182 + return -ENOMEM; 15.183 } 15.184 15.185 static int blktap_release(struct inode *inode, struct file *filp) 15.186 { 15.187 - blktap_dev_inuse = 0; 15.188 - blktap_ring_ok = 0; 15.189 + blktap_dev_inuse = 0; 15.190 + blktap_ring_ok = 0; 15.191 15.192 - /* Free the ring page. */ 15.193 - ClearPageReserved(virt_to_page(blktap_ufe_ring.sring)); 15.194 - free_page((unsigned long) blktap_ufe_ring.sring); 15.195 + /* Free the ring page. */ 15.196 + ClearPageReserved(virt_to_page(blktap_ufe_ring.sring)); 15.197 + free_page((unsigned long) blktap_ufe_ring.sring); 15.198 15.199 - /* Clear any active mappings and free foreign map table */ 15.200 - if (blktap_vma != NULL) { 15.201 - zap_page_range(blktap_vma, blktap_vma->vm_start, 15.202 - blktap_vma->vm_end - blktap_vma->vm_start, NULL); 15.203 - blktap_vma = NULL; 15.204 - } 15.205 + /* Clear any active mappings and free foreign map table */ 15.206 + if (blktap_vma != NULL) { 15.207 + zap_page_range( 15.208 + blktap_vma, blktap_vma->vm_start, 15.209 + blktap_vma->vm_end - blktap_vma->vm_start, NULL); 15.210 + blktap_vma = NULL; 15.211 + } 15.212 15.213 - return 0; 15.214 + return 0; 15.215 } 15.216 15.217 15.218 @@ -283,128 +279,124 @@ static int blktap_release(struct inode * 15.219 */ 15.220 static int blktap_mmap(struct file *filp, struct vm_area_struct *vma) 15.221 { 15.222 - int size; 15.223 - struct page **map; 15.224 - int i; 15.225 + int size; 15.226 + struct page **map; 15.227 + int i; 15.228 15.229 - DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n", 15.230 - vma->vm_start, vma->vm_end); 15.231 + DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n", 15.232 + vma->vm_start, vma->vm_end); 15.233 15.234 - vma->vm_flags |= VM_RESERVED; 15.235 - vma->vm_ops = &blktap_vm_ops; 15.236 + vma->vm_flags |= VM_RESERVED; 15.237 + vma->vm_ops = &blktap_vm_ops; 15.238 15.239 - size = vma->vm_end - vma->vm_start; 15.240 - if ( size != ( (MMAP_PAGES + RING_PAGES) << PAGE_SHIFT ) ) { 15.241 - printk(KERN_INFO 15.242 - "blktap: you _must_ map exactly %d pages!\n", 15.243 - MMAP_PAGES + RING_PAGES); 15.244 - return -EAGAIN; 15.245 - } 15.246 + size = vma->vm_end - vma->vm_start; 15.247 + if (size != ((MMAP_PAGES + RING_PAGES) << PAGE_SHIFT)) { 15.248 + printk(KERN_INFO 15.249 + "blktap: you _must_ map exactly %d pages!\n", 15.250 + MMAP_PAGES + RING_PAGES); 15.251 + return -EAGAIN; 15.252 + } 15.253 15.254 - size >>= PAGE_SHIFT; 15.255 - DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1); 15.256 + size >>= PAGE_SHIFT; 15.257 + DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1); 15.258 15.259 - rings_vstart = vma->vm_start; 15.260 - user_vstart = rings_vstart + (RING_PAGES << PAGE_SHIFT); 15.261 + rings_vstart = vma->vm_start; 15.262 + user_vstart = rings_vstart + (RING_PAGES << PAGE_SHIFT); 15.263 15.264 - /* Map the ring pages to the start of the region and reserve it. */ 15.265 + /* Map the ring pages to the start of the region and reserve it. */ 15.266 15.267 - /* not sure if I really need to do this... */ 15.268 - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 15.269 + /* not sure if I really need to do this... */ 15.270 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 15.271 15.272 - if (remap_pfn_range(vma, vma->vm_start, 15.273 - __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT, 15.274 - PAGE_SIZE, vma->vm_page_prot)) 15.275 - { 15.276 - WPRINTK("Mapping user ring failed!\n"); 15.277 - goto fail; 15.278 - } 15.279 + if (remap_pfn_range(vma, vma->vm_start, 15.280 + __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT, 15.281 + PAGE_SIZE, vma->vm_page_prot)) { 15.282 + WPRINTK("Mapping user ring failed!\n"); 15.283 + goto fail; 15.284 + } 15.285 + 15.286 + /* Mark this VM as containing foreign pages, and set up mappings. */ 15.287 + map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) 15.288 + * sizeof(struct page_struct*), 15.289 + GFP_KERNEL); 15.290 + if (map == NULL) { 15.291 + WPRINTK("Couldn't alloc VM_FOREIGH map.\n"); 15.292 + goto fail; 15.293 + } 15.294 15.295 - /* Mark this VM as containing foreign pages, and set up mappings. */ 15.296 - map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) 15.297 - * sizeof(struct page_struct*), 15.298 - GFP_KERNEL); 15.299 - if (map == NULL) 15.300 - { 15.301 - WPRINTK("Couldn't alloc VM_FOREIGH map.\n"); 15.302 - goto fail; 15.303 - } 15.304 - 15.305 - for (i=0; i<((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++) 15.306 - map[i] = NULL; 15.307 + for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++) 15.308 + map[i] = NULL; 15.309 15.310 - vma->vm_private_data = map; 15.311 - vma->vm_flags |= VM_FOREIGN; 15.312 + vma->vm_private_data = map; 15.313 + vma->vm_flags |= VM_FOREIGN; 15.314 15.315 - blktap_vma = vma; 15.316 - blktap_ring_ok = 1; 15.317 + blktap_vma = vma; 15.318 + blktap_ring_ok = 1; 15.319 15.320 - return 0; 15.321 + return 0; 15.322 fail: 15.323 - /* Clear any active mappings. */ 15.324 - zap_page_range(vma, vma->vm_start, 15.325 - vma->vm_end - vma->vm_start, NULL); 15.326 + /* Clear any active mappings. */ 15.327 + zap_page_range(vma, vma->vm_start, 15.328 + vma->vm_end - vma->vm_start, NULL); 15.329 15.330 - return -ENOMEM; 15.331 + return -ENOMEM; 15.332 } 15.333 15.334 static int blktap_ioctl(struct inode *inode, struct file *filp, 15.335 unsigned int cmd, unsigned long arg) 15.336 { 15.337 - switch(cmd) { 15.338 - case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */ 15.339 - return blktap_read_ufe_ring(); 15.340 + switch(cmd) { 15.341 + case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */ 15.342 + return blktap_read_ufe_ring(); 15.343 15.344 - case BLKTAP_IOCTL_SETMODE: 15.345 - if (BLKTAP_MODE_VALID(arg)) { 15.346 - blktap_mode = arg; 15.347 - /* XXX: may need to flush rings here. */ 15.348 - printk(KERN_INFO "blktap: set mode to %lx\n", arg); 15.349 - return 0; 15.350 - } 15.351 - case BLKTAP_IOCTL_PRINT_IDXS: 15.352 + case BLKTAP_IOCTL_SETMODE: 15.353 + if (BLKTAP_MODE_VALID(arg)) { 15.354 + blktap_mode = arg; 15.355 + /* XXX: may need to flush rings here. */ 15.356 + printk(KERN_INFO "blktap: set mode to %lx\n", arg); 15.357 + return 0; 15.358 + } 15.359 + case BLKTAP_IOCTL_PRINT_IDXS: 15.360 { 15.361 - //print_fe_ring_idxs(); 15.362 - WPRINTK("User Rings: \n-----------\n"); 15.363 - WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d " 15.364 - "| req_prod: %2d, rsp_prod: %2d\n", 15.365 - blktap_ufe_ring.rsp_cons, 15.366 - blktap_ufe_ring.req_prod_pvt, 15.367 - blktap_ufe_ring.sring->req_prod, 15.368 - blktap_ufe_ring.sring->rsp_prod); 15.369 + //print_fe_ring_idxs(); 15.370 + WPRINTK("User Rings: \n-----------\n"); 15.371 + WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d " 15.372 + "| req_prod: %2d, rsp_prod: %2d\n", 15.373 + blktap_ufe_ring.rsp_cons, 15.374 + blktap_ufe_ring.req_prod_pvt, 15.375 + blktap_ufe_ring.sring->req_prod, 15.376 + blktap_ufe_ring.sring->rsp_prod); 15.377 15.378 } 15.379 - } 15.380 - return -ENOIOCTLCMD; 15.381 + } 15.382 + return -ENOIOCTLCMD; 15.383 } 15.384 15.385 static unsigned int blktap_poll(struct file *file, poll_table *wait) 15.386 { 15.387 - poll_wait(file, &blktap_wait, wait); 15.388 - if ( RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring) ) 15.389 - { 15.390 - flush_tlb_all(); 15.391 + poll_wait(file, &blktap_wait, wait); 15.392 + if (RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring)) { 15.393 + flush_tlb_all(); 15.394 + RING_PUSH_REQUESTS(&blktap_ufe_ring); 15.395 + return POLLIN | POLLRDNORM; 15.396 + } 15.397 15.398 - RING_PUSH_REQUESTS(&blktap_ufe_ring); 15.399 - return POLLIN | POLLRDNORM; 15.400 - } 15.401 - 15.402 - return 0; 15.403 + return 0; 15.404 } 15.405 15.406 void blktap_kick_user(void) 15.407 { 15.408 - /* blktap_ring->req_prod = blktap_req_prod; */ 15.409 - wake_up_interruptible(&blktap_wait); 15.410 + /* blktap_ring->req_prod = blktap_req_prod; */ 15.411 + wake_up_interruptible(&blktap_wait); 15.412 } 15.413 15.414 static struct file_operations blktap_fops = { 15.415 - owner: THIS_MODULE, 15.416 - poll: blktap_poll, 15.417 - ioctl: blktap_ioctl, 15.418 - open: blktap_open, 15.419 - release: blktap_release, 15.420 - mmap: blktap_mmap, 15.421 + owner: THIS_MODULE, 15.422 + poll: blktap_poll, 15.423 + ioctl: blktap_ioctl, 15.424 + open: blktap_open, 15.425 + release: blktap_release, 15.426 + mmap: blktap_mmap, 15.427 }; 15.428 15.429 15.430 @@ -417,44 +409,44 @@ static void make_response(blkif_t *blkif 15.431 15.432 static void fast_flush_area(int idx, int nr_pages) 15.433 { 15.434 - struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2]; 15.435 - unsigned int i, op = 0; 15.436 - struct grant_handle_pair *handle; 15.437 - unsigned long ptep; 15.438 + struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2]; 15.439 + unsigned int i, op = 0; 15.440 + struct grant_handle_pair *handle; 15.441 + unsigned long ptep; 15.442 15.443 - for (i=0; i<nr_pages; i++) 15.444 - { 15.445 - handle = &pending_handle(idx, i); 15.446 - if (!BLKTAP_INVALID_HANDLE(handle)) 15.447 - { 15.448 + for ( i = 0; i < nr_pages; i++) 15.449 + { 15.450 + handle = &pending_handle(idx, i); 15.451 + if (BLKTAP_INVALID_HANDLE(handle)) 15.452 + continue; 15.453 15.454 - unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i); 15.455 - unmap[op].dev_bus_addr = 0; 15.456 - unmap[op].handle = handle->kernel; 15.457 - op++; 15.458 + unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i); 15.459 + unmap[op].dev_bus_addr = 0; 15.460 + unmap[op].handle = handle->kernel; 15.461 + op++; 15.462 15.463 - if (create_lookup_pte_addr(blktap_vma->vm_mm, 15.464 - MMAP_VADDR(user_vstart, idx, i), 15.465 - &ptep) !=0) { 15.466 - DPRINTK("Couldn't get a pte addr!\n"); 15.467 - return; 15.468 - } 15.469 - unmap[op].host_addr = ptep; 15.470 - unmap[op].dev_bus_addr = 0; 15.471 - unmap[op].handle = handle->user; 15.472 - op++; 15.473 + if (create_lookup_pte_addr( 15.474 + blktap_vma->vm_mm, 15.475 + MMAP_VADDR(user_vstart, idx, i), 15.476 + &ptep) !=0) { 15.477 + DPRINTK("Couldn't get a pte addr!\n"); 15.478 + return; 15.479 + } 15.480 + unmap[op].host_addr = ptep; 15.481 + unmap[op].dev_bus_addr = 0; 15.482 + unmap[op].handle = handle->user; 15.483 + op++; 15.484 15.485 - BLKTAP_INVALIDATE_HANDLE(handle); 15.486 - } 15.487 - } 15.488 - if ( unlikely(HYPERVISOR_grant_table_op( 15.489 - GNTTABOP_unmap_grant_ref, unmap, op))) 15.490 - BUG(); 15.491 + BLKTAP_INVALIDATE_HANDLE(handle); 15.492 + } 15.493 15.494 - if (blktap_vma != NULL) 15.495 - zap_page_range(blktap_vma, 15.496 - MMAP_VADDR(user_vstart, idx, 0), 15.497 - nr_pages << PAGE_SHIFT, NULL); 15.498 + BUG_ON(HYPERVISOR_grant_table_op( 15.499 + GNTTABOP_unmap_grant_ref, unmap, op)); 15.500 + 15.501 + if (blktap_vma != NULL) 15.502 + zap_page_range(blktap_vma, 15.503 + MMAP_VADDR(user_vstart, idx, 0), 15.504 + nr_pages << PAGE_SHIFT, NULL); 15.505 } 15.506 15.507 /****************************************************************** 15.508 @@ -466,34 +458,38 @@ static spinlock_t blkio_schedule_list_lo 15.509 15.510 static int __on_blkdev_list(blkif_t *blkif) 15.511 { 15.512 - return blkif->blkdev_list.next != NULL; 15.513 + return blkif->blkdev_list.next != NULL; 15.514 } 15.515 15.516 static void remove_from_blkdev_list(blkif_t *blkif) 15.517 { 15.518 - unsigned long flags; 15.519 - if ( !__on_blkdev_list(blkif) ) return; 15.520 - spin_lock_irqsave(&blkio_schedule_list_lock, flags); 15.521 - if ( __on_blkdev_list(blkif) ) 15.522 - { 15.523 - list_del(&blkif->blkdev_list); 15.524 - blkif->blkdev_list.next = NULL; 15.525 - blkif_put(blkif); 15.526 - } 15.527 - spin_unlock_irqrestore(&blkio_schedule_list_lock, flags); 15.528 + unsigned long flags; 15.529 + 15.530 + if (!__on_blkdev_list(blkif)) 15.531 + return; 15.532 + 15.533 + spin_lock_irqsave(&blkio_schedule_list_lock, flags); 15.534 + if (__on_blkdev_list(blkif)) { 15.535 + list_del(&blkif->blkdev_list); 15.536 + blkif->blkdev_list.next = NULL; 15.537 + blkif_put(blkif); 15.538 + } 15.539 + spin_unlock_irqrestore(&blkio_schedule_list_lock, flags); 15.540 } 15.541 15.542 static void add_to_blkdev_list_tail(blkif_t *blkif) 15.543 { 15.544 - unsigned long flags; 15.545 - if ( __on_blkdev_list(blkif) ) return; 15.546 - spin_lock_irqsave(&blkio_schedule_list_lock, flags); 15.547 - if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) ) 15.548 - { 15.549 - list_add_tail(&blkif->blkdev_list, &blkio_schedule_list); 15.550 - blkif_get(blkif); 15.551 - } 15.552 - spin_unlock_irqrestore(&blkio_schedule_list_lock, flags); 15.553 + unsigned long flags; 15.554 + 15.555 + if (__on_blkdev_list(blkif)) 15.556 + return; 15.557 + 15.558 + spin_lock_irqsave(&blkio_schedule_list_lock, flags); 15.559 + if (!__on_blkdev_list(blkif) && (blkif->status == CONNECTED)) { 15.560 + list_add_tail(&blkif->blkdev_list, &blkio_schedule_list); 15.561 + blkif_get(blkif); 15.562 + } 15.563 + spin_unlock_irqrestore(&blkio_schedule_list_lock, flags); 15.564 } 15.565 15.566 15.567 @@ -505,51 +501,50 @@ static DECLARE_WAIT_QUEUE_HEAD(blkio_sch 15.568 15.569 static int blkio_schedule(void *arg) 15.570 { 15.571 - DECLARE_WAITQUEUE(wq, current); 15.572 + DECLARE_WAITQUEUE(wq, current); 15.573 15.574 - blkif_t *blkif; 15.575 - struct list_head *ent; 15.576 + blkif_t *blkif; 15.577 + struct list_head *ent; 15.578 15.579 - daemonize("xenblkd"); 15.580 + daemonize("xenblkd"); 15.581 15.582 - for ( ; ; ) 15.583 - { 15.584 - /* Wait for work to do. */ 15.585 - add_wait_queue(&blkio_schedule_wait, &wq); 15.586 - set_current_state(TASK_INTERRUPTIBLE); 15.587 - if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 15.588 - list_empty(&blkio_schedule_list) ) 15.589 - schedule(); 15.590 - __set_current_state(TASK_RUNNING); 15.591 - remove_wait_queue(&blkio_schedule_wait, &wq); 15.592 + for (;;) { 15.593 + /* Wait for work to do. */ 15.594 + add_wait_queue(&blkio_schedule_wait, &wq); 15.595 + set_current_state(TASK_INTERRUPTIBLE); 15.596 + if ((NR_PENDING_REQS == MAX_PENDING_REQS) || 15.597 + list_empty(&blkio_schedule_list)) 15.598 + schedule(); 15.599 + __set_current_state(TASK_RUNNING); 15.600 + remove_wait_queue(&blkio_schedule_wait, &wq); 15.601 15.602 - /* Queue up a batch of requests. */ 15.603 - while ( (NR_PENDING_REQS < MAX_PENDING_REQS) && 15.604 - !list_empty(&blkio_schedule_list) ) 15.605 - { 15.606 - ent = blkio_schedule_list.next; 15.607 - blkif = list_entry(ent, blkif_t, blkdev_list); 15.608 - blkif_get(blkif); 15.609 - remove_from_blkdev_list(blkif); 15.610 - if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) ) 15.611 - add_to_blkdev_list_tail(blkif); 15.612 - blkif_put(blkif); 15.613 - } 15.614 - } 15.615 + /* Queue up a batch of requests. */ 15.616 + while ((NR_PENDING_REQS < MAX_PENDING_REQS) && 15.617 + !list_empty(&blkio_schedule_list)) { 15.618 + ent = blkio_schedule_list.next; 15.619 + blkif = list_entry(ent, blkif_t, blkdev_list); 15.620 + blkif_get(blkif); 15.621 + remove_from_blkdev_list(blkif); 15.622 + if (do_block_io_op(blkif, BATCH_PER_DOMAIN)) 15.623 + add_to_blkdev_list_tail(blkif); 15.624 + blkif_put(blkif); 15.625 + } 15.626 + } 15.627 } 15.628 15.629 static void maybe_trigger_blkio_schedule(void) 15.630 { 15.631 - /* 15.632 - * Needed so that two processes, who together make the following predicate 15.633 - * true, don't both read stale values and evaluate the predicate 15.634 - * incorrectly. Incredibly unlikely to stall the scheduler on x86, but... 15.635 - */ 15.636 - smp_mb(); 15.637 + /* 15.638 + * Needed so that two processes, who together make the following 15.639 + * predicate true, don't both read stale values and evaluate the 15.640 + * predicate incorrectly. Incredibly unlikely to stall the scheduler 15.641 + * on the x86, but... 15.642 + */ 15.643 + smp_mb(); 15.644 15.645 - if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) && 15.646 - !list_empty(&blkio_schedule_list) ) 15.647 - wake_up(&blkio_schedule_wait); 15.648 + if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) && 15.649 + !list_empty(&blkio_schedule_list)) 15.650 + wake_up(&blkio_schedule_wait); 15.651 } 15.652 15.653 15.654 @@ -561,54 +556,53 @@ static void maybe_trigger_blkio_schedule 15.655 15.656 static int blktap_read_ufe_ring(void) 15.657 { 15.658 - /* This is called to read responses from the UFE ring. */ 15.659 + /* This is called to read responses from the UFE ring. */ 15.660 15.661 - RING_IDX i, j, rp; 15.662 - blkif_response_t *resp; 15.663 - blkif_t *blkif; 15.664 - int pending_idx; 15.665 - pending_req_t *pending_req; 15.666 - unsigned long flags; 15.667 + RING_IDX i, j, rp; 15.668 + blkif_response_t *resp; 15.669 + blkif_t *blkif; 15.670 + int pending_idx; 15.671 + pending_req_t *pending_req; 15.672 + unsigned long flags; 15.673 15.674 - /* if we are forwarding from UFERring to FERing */ 15.675 - if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) { 15.676 + /* if we are forwarding from UFERring to FERing */ 15.677 + if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) { 15.678 15.679 - /* for each outstanding message on the UFEring */ 15.680 - rp = blktap_ufe_ring.sring->rsp_prod; 15.681 - rmb(); 15.682 + /* for each outstanding message on the UFEring */ 15.683 + rp = blktap_ufe_ring.sring->rsp_prod; 15.684 + rmb(); 15.685 15.686 - for ( i = blktap_ufe_ring.rsp_cons; i != rp; i++ ) 15.687 - { 15.688 - resp = RING_GET_RESPONSE(&blktap_ufe_ring, i); 15.689 - pending_idx = MASK_PEND_IDX(ID_TO_IDX(resp->id)); 15.690 - pending_req = &pending_reqs[pending_idx]; 15.691 + for (i = blktap_ufe_ring.rsp_cons; i != rp; i++) { 15.692 + resp = RING_GET_RESPONSE(&blktap_ufe_ring, i); 15.693 + pending_idx = MASK_PEND_IDX(ID_TO_IDX(resp->id)); 15.694 + pending_req = &pending_reqs[pending_idx]; 15.695 15.696 - blkif = pending_req->blkif; 15.697 - for (j = 0; j < pending_req->nr_pages; j++) { 15.698 - unsigned long vaddr; 15.699 - struct page **map = blktap_vma->vm_private_data; 15.700 - int offset; 15.701 + blkif = pending_req->blkif; 15.702 + for (j = 0; j < pending_req->nr_pages; j++) { 15.703 + unsigned long vaddr; 15.704 + struct page **map = blktap_vma->vm_private_data; 15.705 + int offset; 15.706 15.707 - vaddr = MMAP_VADDR(user_vstart, pending_idx, j); 15.708 - offset = (vaddr - blktap_vma->vm_start) >> PAGE_SHIFT; 15.709 + vaddr = MMAP_VADDR(user_vstart, pending_idx, j); 15.710 + offset = (vaddr - blktap_vma->vm_start) >> PAGE_SHIFT; 15.711 15.712 - //ClearPageReserved(virt_to_page(vaddr)); 15.713 - ClearPageReserved((struct page *)map[offset]); 15.714 - map[offset] = NULL; 15.715 - } 15.716 + //ClearPageReserved(virt_to_page(vaddr)); 15.717 + ClearPageReserved((struct page *)map[offset]); 15.718 + map[offset] = NULL; 15.719 + } 15.720 15.721 - fast_flush_area(pending_idx, pending_req->nr_pages); 15.722 - make_response(blkif, pending_req->id, resp->operation, 15.723 - resp->status); 15.724 - blkif_put(pending_req->blkif); 15.725 - spin_lock_irqsave(&pend_prod_lock, flags); 15.726 - pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; 15.727 - spin_unlock_irqrestore(&pend_prod_lock, flags); 15.728 - } 15.729 - blktap_ufe_ring.rsp_cons = i; 15.730 - maybe_trigger_blkio_schedule(); 15.731 - } 15.732 - return 0; 15.733 + fast_flush_area(pending_idx, pending_req->nr_pages); 15.734 + make_response(blkif, pending_req->id, resp->operation, 15.735 + resp->status); 15.736 + blkif_put(pending_req->blkif); 15.737 + spin_lock_irqsave(&pend_prod_lock, flags); 15.738 + pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; 15.739 + spin_unlock_irqrestore(&pend_prod_lock, flags); 15.740 + } 15.741 + blktap_ufe_ring.rsp_cons = i; 15.742 + maybe_trigger_blkio_schedule(); 15.743 + } 15.744 + return 0; 15.745 } 15.746 15.747 15.748 @@ -618,10 +612,10 @@ static int blktap_read_ufe_ring(void) 15.749 15.750 irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs) 15.751 { 15.752 - blkif_t *blkif = dev_id; 15.753 - add_to_blkdev_list_tail(blkif); 15.754 - maybe_trigger_blkio_schedule(); 15.755 - return IRQ_HANDLED; 15.756 + blkif_t *blkif = dev_id; 15.757 + add_to_blkdev_list_tail(blkif); 15.758 + maybe_trigger_blkio_schedule(); 15.759 + return IRQ_HANDLED; 15.760 } 15.761 15.762 15.763 @@ -632,199 +626,194 @@ irqreturn_t blkif_be_int(int irq, void * 15.764 15.765 static int do_block_io_op(blkif_t *blkif, int max_to_do) 15.766 { 15.767 - blkif_back_ring_t *blk_ring = &blkif->blk_ring; 15.768 - blkif_request_t *req; 15.769 - RING_IDX i, rp; 15.770 - int more_to_do = 0; 15.771 + blkif_back_ring_t *blk_ring = &blkif->blk_ring; 15.772 + blkif_request_t *req; 15.773 + RING_IDX i, rp; 15.774 + int more_to_do = 0; 15.775 15.776 - rp = blk_ring->sring->req_prod; 15.777 - rmb(); /* Ensure we see queued requests up to 'rp'. */ 15.778 + rp = blk_ring->sring->req_prod; 15.779 + rmb(); /* Ensure we see queued requests up to 'rp'. */ 15.780 15.781 - for ( i = blk_ring->req_cons; 15.782 - (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i); 15.783 - i++ ) 15.784 - { 15.785 - if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) ) 15.786 - { 15.787 - more_to_do = 1; 15.788 - break; 15.789 - } 15.790 + for (i = blk_ring->req_cons; 15.791 + (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i); 15.792 + i++ ) { 15.793 + if ((max_to_do-- == 0) || 15.794 + (NR_PENDING_REQS == MAX_PENDING_REQS)) { 15.795 + more_to_do = 1; 15.796 + break; 15.797 + } 15.798 15.799 - req = RING_GET_REQUEST(blk_ring, i); 15.800 - switch ( req->operation ) 15.801 - { 15.802 - case BLKIF_OP_READ: 15.803 - case BLKIF_OP_WRITE: 15.804 - dispatch_rw_block_io(blkif, req); 15.805 - break; 15.806 + req = RING_GET_REQUEST(blk_ring, i); 15.807 + switch (req->operation) { 15.808 + case BLKIF_OP_READ: 15.809 + case BLKIF_OP_WRITE: 15.810 + dispatch_rw_block_io(blkif, req); 15.811 + break; 15.812 15.813 - default: 15.814 - DPRINTK("error: unknown block io operation [%d]\n", 15.815 - req->operation); 15.816 - make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); 15.817 - break; 15.818 - } 15.819 - } 15.820 + default: 15.821 + DPRINTK("error: unknown block io operation [%d]\n", 15.822 + req->operation); 15.823 + make_response(blkif, req->id, req->operation, 15.824 + BLKIF_RSP_ERROR); 15.825 + break; 15.826 + } 15.827 + } 15.828 15.829 - blk_ring->req_cons = i; 15.830 - blktap_kick_user(); 15.831 + blk_ring->req_cons = i; 15.832 + blktap_kick_user(); 15.833 15.834 - return more_to_do; 15.835 + return more_to_do; 15.836 } 15.837 15.838 static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req) 15.839 { 15.840 - blkif_request_t *target; 15.841 - int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)]; 15.842 - pending_req_t *pending_req; 15.843 - struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2]; 15.844 - int op, ret; 15.845 - unsigned int nseg; 15.846 + blkif_request_t *target; 15.847 + int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)]; 15.848 + pending_req_t *pending_req; 15.849 + struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2]; 15.850 + int op, ret; 15.851 + unsigned int nseg; 15.852 15.853 - /* Check that number of segments is sane. */ 15.854 - nseg = req->nr_segments; 15.855 - if ( unlikely(nseg == 0) || 15.856 - unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) 15.857 - { 15.858 - DPRINTK("Bad number of segments in request (%d)\n", nseg); 15.859 - goto bad_descriptor; 15.860 - } 15.861 + /* Check that number of segments is sane. */ 15.862 + nseg = req->nr_segments; 15.863 + if (unlikely(nseg == 0) || 15.864 + unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { 15.865 + DPRINTK("Bad number of segments in request (%d)\n", nseg); 15.866 + goto bad_descriptor; 15.867 + } 15.868 15.869 - /* Make sure userspace is ready. */ 15.870 - if (!blktap_ring_ok) { 15.871 - DPRINTK("blktap: ring not ready for requests!\n"); 15.872 - goto bad_descriptor; 15.873 - } 15.874 + /* Make sure userspace is ready. */ 15.875 + if (!blktap_ring_ok) { 15.876 + DPRINTK("blktap: ring not ready for requests!\n"); 15.877 + goto bad_descriptor; 15.878 + } 15.879 15.880 15.881 - if ( RING_FULL(&blktap_ufe_ring) ) { 15.882 - WPRINTK("blktap: fe_ring is full, can't add (very broken!).\n"); 15.883 - goto bad_descriptor; 15.884 - } 15.885 - 15.886 - flush_cache_all(); /* a noop on intel... */ 15.887 + if (RING_FULL(&blktap_ufe_ring)) { 15.888 + WPRINTK("blktap: fe_ring is full, can't add " 15.889 + "(very broken!).\n"); 15.890 + goto bad_descriptor; 15.891 + } 15.892 15.893 - /* Map the foreign pages directly in to the application */ 15.894 - op = 0; 15.895 - for (i=0; i<req->nr_segments; i++) { 15.896 - 15.897 - unsigned long uvaddr; 15.898 - unsigned long kvaddr; 15.899 - unsigned long ptep; 15.900 + flush_cache_all(); /* a noop on intel... */ 15.901 15.902 - uvaddr = MMAP_VADDR(user_vstart, pending_idx, i); 15.903 - kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i); 15.904 + /* Map the foreign pages directly in to the application */ 15.905 + op = 0; 15.906 + for (i = 0; i < req->nr_segments; i++) { 15.907 15.908 - /* Map the remote page to kernel. */ 15.909 - map[op].host_addr = kvaddr; 15.910 - map[op].dom = blkif->domid; 15.911 - map[op].ref = blkif_gref_from_fas(req->frame_and_sects[i]); 15.912 - map[op].flags = GNTMAP_host_map; 15.913 - /* This needs a bit more thought in terms of interposition: 15.914 - * If we want to be able to modify pages during write using 15.915 - * grant table mappings, the guest will either need to allow 15.916 - * it, or we'll need to incur a copy. Bit of an fbufs moment. ;) */ 15.917 - if (req->operation == BLKIF_OP_WRITE) 15.918 - map[op].flags |= GNTMAP_readonly; 15.919 - op++; 15.920 + unsigned long uvaddr; 15.921 + unsigned long kvaddr; 15.922 + unsigned long ptep; 15.923 + 15.924 + uvaddr = MMAP_VADDR(user_vstart, pending_idx, i); 15.925 + kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i); 15.926 15.927 - /* Now map it to user. */ 15.928 - ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep); 15.929 - if (ret) 15.930 - { 15.931 - DPRINTK("Couldn't get a pte addr!\n"); 15.932 - fast_flush_area(pending_idx, req->nr_segments); 15.933 - goto bad_descriptor; 15.934 - } 15.935 + /* Map the remote page to kernel. */ 15.936 + map[op].host_addr = kvaddr; 15.937 + map[op].dom = blkif->domid; 15.938 + map[op].ref = blkif_gref_from_fas(req->frame_and_sects[i]); 15.939 + map[op].flags = GNTMAP_host_map; 15.940 + /* This needs a bit more thought in terms of interposition: 15.941 + * If we want to be able to modify pages during write using 15.942 + * grant table mappings, the guest will either need to allow 15.943 + * it, or we'll need to incur a copy. Bit of an fbufs moment. ;) */ 15.944 + if (req->operation == BLKIF_OP_WRITE) 15.945 + map[op].flags |= GNTMAP_readonly; 15.946 + op++; 15.947 + 15.948 + /* Now map it to user. */ 15.949 + ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep); 15.950 + if (ret) { 15.951 + DPRINTK("Couldn't get a pte addr!\n"); 15.952 + fast_flush_area(pending_idx, req->nr_segments); 15.953 + goto bad_descriptor; 15.954 + } 15.955 15.956 - map[op].host_addr = ptep; 15.957 - map[op].dom = blkif->domid; 15.958 - map[op].ref = blkif_gref_from_fas(req->frame_and_sects[i]); 15.959 - map[op].flags = GNTMAP_host_map | GNTMAP_application_map 15.960 - | GNTMAP_contains_pte; 15.961 - /* Above interposition comment applies here as well. */ 15.962 - if (req->operation == BLKIF_OP_WRITE) 15.963 - map[op].flags |= GNTMAP_readonly; 15.964 - op++; 15.965 - } 15.966 + map[op].host_addr = ptep; 15.967 + map[op].dom = blkif->domid; 15.968 + map[op].ref = blkif_gref_from_fas(req->frame_and_sects[i]); 15.969 + map[op].flags = GNTMAP_host_map | GNTMAP_application_map 15.970 + | GNTMAP_contains_pte; 15.971 + /* Above interposition comment applies here as well. */ 15.972 + if (req->operation == BLKIF_OP_WRITE) 15.973 + map[op].flags |= GNTMAP_readonly; 15.974 + op++; 15.975 + } 15.976 15.977 - if ( unlikely(HYPERVISOR_grant_table_op( 15.978 - GNTTABOP_map_grant_ref, map, op))) 15.979 - BUG(); 15.980 + BUG_ON(HYPERVISOR_grant_table_op( 15.981 + GNTTABOP_map_grant_ref, map, op)); 15.982 15.983 - op = 0; 15.984 - for (i=0; i<(req->nr_segments*2); i+=2) { 15.985 - unsigned long uvaddr; 15.986 - unsigned long kvaddr; 15.987 - unsigned long offset; 15.988 - int cancel = 0; 15.989 + op = 0; 15.990 + for (i = 0; i < (req->nr_segments*2); i += 2) { 15.991 + unsigned long uvaddr; 15.992 + unsigned long kvaddr; 15.993 + unsigned long offset; 15.994 + int cancel = 0; 15.995 15.996 - uvaddr = MMAP_VADDR(user_vstart, pending_idx, i/2); 15.997 - kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i/2); 15.998 + uvaddr = MMAP_VADDR(user_vstart, pending_idx, i/2); 15.999 + kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i/2); 15.1000 15.1001 - if ( unlikely(map[i].handle < 0) ) 15.1002 - { 15.1003 - DPRINTK("Error on kernel grant mapping (%d)\n", map[i].handle); 15.1004 - ret = map[i].handle; 15.1005 - cancel = 1; 15.1006 - } 15.1007 + if (unlikely(map[i].handle < 0)) { 15.1008 + DPRINTK("Error on kernel grant mapping (%d)\n", 15.1009 + map[i].handle); 15.1010 + ret = map[i].handle; 15.1011 + cancel = 1; 15.1012 + } 15.1013 15.1014 - if ( unlikely(map[i+1].handle < 0) ) 15.1015 - { 15.1016 - DPRINTK("Error on user grant mapping (%d)\n", map[i+1].handle); 15.1017 - ret = map[i+1].handle; 15.1018 - cancel = 1; 15.1019 - } 15.1020 + if (unlikely(map[i+1].handle < 0)) { 15.1021 + DPRINTK("Error on user grant mapping (%d)\n", 15.1022 + map[i+1].handle); 15.1023 + ret = map[i+1].handle; 15.1024 + cancel = 1; 15.1025 + } 15.1026 15.1027 - if (cancel) 15.1028 - { 15.1029 - fast_flush_area(pending_idx, req->nr_segments); 15.1030 - goto bad_descriptor; 15.1031 - } 15.1032 + if (cancel) { 15.1033 + fast_flush_area(pending_idx, req->nr_segments); 15.1034 + goto bad_descriptor; 15.1035 + } 15.1036 15.1037 - /* Set the necessary mappings in p2m and in the VM_FOREIGN 15.1038 - * vm_area_struct to allow user vaddr -> struct page lookups 15.1039 - * to work. This is needed for direct IO to foreign pages. */ 15.1040 - phys_to_machine_mapping[__pa(kvaddr) >> PAGE_SHIFT] = 15.1041 - FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT); 15.1042 + /* Set the necessary mappings in p2m and in the VM_FOREIGN 15.1043 + * vm_area_struct to allow user vaddr -> struct page lookups 15.1044 + * to work. This is needed for direct IO to foreign pages. */ 15.1045 + phys_to_machine_mapping[__pa(kvaddr) >> PAGE_SHIFT] = 15.1046 + FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT); 15.1047 15.1048 - offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT; 15.1049 - ((struct page **)blktap_vma->vm_private_data)[offset] = 15.1050 - pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT); 15.1051 + offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT; 15.1052 + ((struct page **)blktap_vma->vm_private_data)[offset] = 15.1053 + pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT); 15.1054 15.1055 - /* Save handles for unmapping later. */ 15.1056 - pending_handle(pending_idx, i/2).kernel = map[i].handle; 15.1057 - pending_handle(pending_idx, i/2).user = map[i+1].handle; 15.1058 - } 15.1059 + /* Save handles for unmapping later. */ 15.1060 + pending_handle(pending_idx, i/2).kernel = map[i].handle; 15.1061 + pending_handle(pending_idx, i/2).user = map[i+1].handle; 15.1062 + } 15.1063 15.1064 - /* Mark mapped pages as reserved: */ 15.1065 - for ( i = 0; i < req->nr_segments; i++ ) 15.1066 - { 15.1067 - unsigned long kvaddr; 15.1068 + /* Mark mapped pages as reserved: */ 15.1069 + for (i = 0; i < req->nr_segments; i++) { 15.1070 + unsigned long kvaddr; 15.1071 + kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i); 15.1072 + SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT)); 15.1073 + } 15.1074 15.1075 - kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i); 15.1076 - SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT)); 15.1077 - } 15.1078 + pending_req = &pending_reqs[pending_idx]; 15.1079 + pending_req->blkif = blkif; 15.1080 + pending_req->id = req->id; 15.1081 + pending_req->operation = req->operation; 15.1082 + pending_req->status = BLKIF_RSP_OKAY; 15.1083 + pending_req->nr_pages = nseg; 15.1084 + req->id = MAKE_ID(blkif->domid, pending_idx); 15.1085 + //atomic_set(&pending_req->pendcnt, nbio); 15.1086 + pending_cons++; 15.1087 + blkif_get(blkif); 15.1088 15.1089 - pending_req = &pending_reqs[pending_idx]; 15.1090 - pending_req->blkif = blkif; 15.1091 - pending_req->id = req->id; 15.1092 - pending_req->operation = req->operation; 15.1093 - pending_req->status = BLKIF_RSP_OKAY; 15.1094 - pending_req->nr_pages = nseg; 15.1095 - req->id = MAKE_ID(blkif->domid, pending_idx); 15.1096 - //atomic_set(&pending_req->pendcnt, nbio); 15.1097 - pending_cons++; 15.1098 - blkif_get(blkif); 15.1099 - 15.1100 - /* Finally, write the request message to the user ring. */ 15.1101 - target = RING_GET_REQUEST(&blktap_ufe_ring, blktap_ufe_ring.req_prod_pvt); 15.1102 - memcpy(target, req, sizeof(*req)); 15.1103 - blktap_ufe_ring.req_prod_pvt++; 15.1104 - return; 15.1105 + /* Finally, write the request message to the user ring. */ 15.1106 + target = RING_GET_REQUEST(&blktap_ufe_ring, 15.1107 + blktap_ufe_ring.req_prod_pvt); 15.1108 + memcpy(target, req, sizeof(*req)); 15.1109 + blktap_ufe_ring.req_prod_pvt++; 15.1110 + return; 15.1111 15.1112 bad_descriptor: 15.1113 - make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); 15.1114 + make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); 15.1115 } 15.1116 15.1117 15.1118 @@ -837,80 +826,89 @@ static void dispatch_rw_block_io(blkif_t 15.1119 static void make_response(blkif_t *blkif, unsigned long id, 15.1120 unsigned short op, int st) 15.1121 { 15.1122 - blkif_response_t *resp; 15.1123 - unsigned long flags; 15.1124 - blkif_back_ring_t *blk_ring = &blkif->blk_ring; 15.1125 + blkif_response_t *resp; 15.1126 + unsigned long flags; 15.1127 + blkif_back_ring_t *blk_ring = &blkif->blk_ring; 15.1128 15.1129 - /* Place on the response ring for the relevant domain. */ 15.1130 - spin_lock_irqsave(&blkif->blk_ring_lock, flags); 15.1131 - resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt); 15.1132 - resp->id = id; 15.1133 - resp->operation = op; 15.1134 - resp->status = st; 15.1135 - wmb(); /* Ensure other side can see the response fields. */ 15.1136 - blk_ring->rsp_prod_pvt++; 15.1137 - RING_PUSH_RESPONSES(blk_ring); 15.1138 - spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); 15.1139 + /* Place on the response ring for the relevant domain. */ 15.1140 + spin_lock_irqsave(&blkif->blk_ring_lock, flags); 15.1141 + resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt); 15.1142 + resp->id = id; 15.1143 + resp->operation = op; 15.1144 + resp->status = st; 15.1145 + wmb(); /* Ensure other side can see the response fields. */ 15.1146 + blk_ring->rsp_prod_pvt++; 15.1147 + RING_PUSH_RESPONSES(blk_ring); 15.1148 + spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); 15.1149 15.1150 - /* Kick the relevant domain. */ 15.1151 - notify_via_evtchn(blkif->evtchn); 15.1152 + /* Kick the relevant domain. */ 15.1153 + notify_via_evtchn(blkif->evtchn); 15.1154 } 15.1155 15.1156 static struct miscdevice blktap_miscdev = { 15.1157 - .minor = BLKTAP_MINOR, 15.1158 - .name = "blktap", 15.1159 - .fops = &blktap_fops, 15.1160 - .devfs_name = "misc/blktap", 15.1161 + .minor = BLKTAP_MINOR, 15.1162 + .name = "blktap", 15.1163 + .fops = &blktap_fops, 15.1164 + .devfs_name = "misc/blktap", 15.1165 }; 15.1166 15.1167 void blkif_deschedule(blkif_t *blkif) 15.1168 { 15.1169 - remove_from_blkdev_list(blkif); 15.1170 + remove_from_blkdev_list(blkif); 15.1171 } 15.1172 15.1173 static int __init blkif_init(void) 15.1174 { 15.1175 - int i, j, err; 15.1176 - struct page *page; 15.1177 + int i, j, err; 15.1178 + struct page *page; 15.1179 /* 15.1180 - if ( !(xen_start_info->flags & SIF_INITDOMAIN) && 15.1181 - !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) ) 15.1182 - return 0; 15.1183 + if ( !(xen_start_info->flags & SIF_INITDOMAIN) && 15.1184 + !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) ) 15.1185 + return 0; 15.1186 */ 15.1187 - blkif_interface_init(); 15.1188 + blkif_interface_init(); 15.1189 15.1190 - page = balloon_alloc_empty_page_range(MMAP_PAGES); 15.1191 - BUG_ON(page == NULL); 15.1192 - mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); 15.1193 + page = balloon_alloc_empty_page_range(MMAP_PAGES); 15.1194 + BUG_ON(page == NULL); 15.1195 + mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); 15.1196 15.1197 - pending_cons = 0; 15.1198 - pending_prod = MAX_PENDING_REQS; 15.1199 - memset(pending_reqs, 0, sizeof(pending_reqs)); 15.1200 - for ( i = 0; i < MAX_PENDING_REQS; i++ ) 15.1201 - pending_ring[i] = i; 15.1202 + pending_cons = 0; 15.1203 + pending_prod = MAX_PENDING_REQS; 15.1204 + memset(pending_reqs, 0, sizeof(pending_reqs)); 15.1205 + for ( i = 0; i < MAX_PENDING_REQS; i++ ) 15.1206 + pending_ring[i] = i; 15.1207 15.1208 - spin_lock_init(&blkio_schedule_list_lock); 15.1209 - INIT_LIST_HEAD(&blkio_schedule_list); 15.1210 + spin_lock_init(&blkio_schedule_list_lock); 15.1211 + INIT_LIST_HEAD(&blkio_schedule_list); 15.1212 + 15.1213 + BUG_ON(kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0); 15.1214 15.1215 - if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 ) 15.1216 - BUG(); 15.1217 + blkif_xenbus_init(); 15.1218 15.1219 - blkif_xenbus_init(); 15.1220 + for (i = 0; i < MAX_PENDING_REQS ; i++) 15.1221 + for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++) 15.1222 + BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j)); 15.1223 15.1224 - for (i=0; i<MAX_PENDING_REQS ; i++) 15.1225 - for (j=0; j<BLKIF_MAX_SEGMENTS_PER_REQUEST; j++) 15.1226 - BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j)); 15.1227 + err = misc_register(&blktap_miscdev); 15.1228 + if (err != 0) { 15.1229 + printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n", 15.1230 + err); 15.1231 + return err; 15.1232 + } 15.1233 15.1234 - err = misc_register(&blktap_miscdev); 15.1235 - if ( err != 0 ) 15.1236 - { 15.1237 - printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n", err); 15.1238 - return err; 15.1239 - } 15.1240 + init_waitqueue_head(&blktap_wait); 15.1241 15.1242 - init_waitqueue_head(&blktap_wait); 15.1243 - 15.1244 - return 0; 15.1245 + return 0; 15.1246 } 15.1247 15.1248 __initcall(blkif_init); 15.1249 + 15.1250 +/* 15.1251 + * Local variables: 15.1252 + * c-file-style: "linux" 15.1253 + * indent-tabs-mode: t 15.1254 + * c-indent-level: 8 15.1255 + * c-basic-offset: 8 15.1256 + * tab-width: 8 15.1257 + * End: 15.1258 + */
16.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/common.h Thu Sep 22 16:05:44 2005 +0100 16.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/common.h Thu Sep 22 16:12:14 2005 +0100 16.3 @@ -33,39 +33,39 @@ 16.4 #define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args) 16.5 16.6 struct vbd { 16.7 - blkif_vdev_t handle; /* what the domain refers to this vbd as */ 16.8 - unsigned char readonly; /* Non-zero -> read-only */ 16.9 - unsigned char type; /* VDISK_xxx */ 16.10 - u32 pdevice; /* phys device that this vbd maps to */ 16.11 - struct block_device *bdev; 16.12 + blkif_vdev_t handle; /* what the domain refers to this vbd as */ 16.13 + unsigned char readonly; /* Non-zero -> read-only */ 16.14 + unsigned char type; /* VDISK_xxx */ 16.15 + u32 pdevice; /* phys device that this vbd maps to */ 16.16 + struct block_device *bdev; 16.17 }; 16.18 16.19 typedef struct blkif_st { 16.20 - /* Unique identifier for this interface. */ 16.21 - domid_t domid; 16.22 - unsigned int handle; 16.23 - /* Physical parameters of the comms window. */ 16.24 - unsigned int evtchn; 16.25 - unsigned int remote_evtchn; 16.26 - /* Comms information. */ 16.27 - blkif_back_ring_t blk_ring; 16.28 - struct vm_struct *blk_ring_area; 16.29 - /* VBDs attached to this interface. */ 16.30 - struct vbd vbd; 16.31 - /* Private fields. */ 16.32 - enum { DISCONNECTED, CONNECTED } status; 16.33 + /* Unique identifier for this interface. */ 16.34 + domid_t domid; 16.35 + unsigned int handle; 16.36 + /* Physical parameters of the comms window. */ 16.37 + unsigned int evtchn; 16.38 + unsigned int remote_evtchn; 16.39 + /* Comms information. */ 16.40 + blkif_back_ring_t blk_ring; 16.41 + struct vm_struct *blk_ring_area; 16.42 + /* VBDs attached to this interface. */ 16.43 + struct vbd vbd; 16.44 + /* Private fields. */ 16.45 + enum { DISCONNECTED, CONNECTED } status; 16.46 #ifdef CONFIG_XEN_BLKDEV_TAP_BE 16.47 - /* Is this a blktap frontend */ 16.48 - unsigned int is_blktap; 16.49 + /* Is this a blktap frontend */ 16.50 + unsigned int is_blktap; 16.51 #endif 16.52 - struct list_head blkdev_list; 16.53 - spinlock_t blk_ring_lock; 16.54 - atomic_t refcnt; 16.55 + struct list_head blkdev_list; 16.56 + spinlock_t blk_ring_lock; 16.57 + atomic_t refcnt; 16.58 16.59 - struct work_struct free_work; 16.60 + struct work_struct free_work; 16.61 16.62 - u16 shmem_handle; 16.63 - grant_ref_t shmem_ref; 16.64 + u16 shmem_handle; 16.65 + grant_ref_t shmem_ref; 16.66 } blkif_t; 16.67 16.68 blkif_t *alloc_blkif(domid_t domid); 16.69 @@ -89,10 +89,10 @@ unsigned int vbd_info(struct vbd *vbd); 16.70 unsigned long vbd_secsize(struct vbd *vbd); 16.71 16.72 struct phys_req { 16.73 - unsigned short dev; 16.74 - unsigned short nr_sects; 16.75 - struct block_device *bdev; 16.76 - blkif_sector_t sector_number; 16.77 + unsigned short dev; 16.78 + unsigned short nr_sects; 16.79 + struct block_device *bdev; 16.80 + blkif_sector_t sector_number; 16.81 }; 16.82 16.83 int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation); 16.84 @@ -106,3 +106,13 @@ void blkif_xenbus_init(void); 16.85 irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs); 16.86 16.87 #endif /* __BLKIF__BACKEND__COMMON_H__ */ 16.88 + 16.89 +/* 16.90 + * Local variables: 16.91 + * c-file-style: "linux" 16.92 + * indent-tabs-mode: t 16.93 + * c-indent-level: 8 16.94 + * c-basic-offset: 8 16.95 + * tab-width: 8 16.96 + * End: 16.97 + */
17.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c Thu Sep 22 16:05:44 2005 +0100 17.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c Thu Sep 22 16:12:14 2005 +0100 17.3 @@ -222,3 +222,13 @@ void blkif_xenbus_init(void) 17.4 { 17.5 xenbus_register_backend(&blkback); 17.6 } 17.7 + 17.8 +/* 17.9 + * Local variables: 17.10 + * c-file-style: "linux" 17.11 + * indent-tabs-mode: t 17.12 + * c-indent-level: 8 17.13 + * c-basic-offset: 8 17.14 + * tab-width: 8 17.15 + * End: 17.16 + */
18.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/console.c Thu Sep 22 16:05:44 2005 +0100 18.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/console.c Thu Sep 22 16:12:14 2005 +0100 18.3 @@ -75,31 +75,33 @@ extern int sysrq_enabled; 18.4 18.5 static int __init xencons_setup(char *str) 18.6 { 18.7 - char *q; 18.8 - int n; 18.9 + char *q; 18.10 + int n; 18.11 18.12 - if ( !strncmp(str, "ttyS", 4) ) 18.13 - xc_mode = XC_SERIAL; 18.14 - else if ( !strncmp(str, "tty", 3) ) 18.15 - xc_mode = XC_TTY; 18.16 - else if ( !strncmp(str, "off", 3) ) 18.17 - xc_mode = XC_OFF; 18.18 + if (!strncmp(str, "ttyS", 4)) 18.19 + xc_mode = XC_SERIAL; 18.20 + else if (!strncmp(str, "tty", 3)) 18.21 + xc_mode = XC_TTY; 18.22 + else if (!strncmp(str, "off", 3)) 18.23 + xc_mode = XC_OFF; 18.24 18.25 - switch ( xc_mode ) 18.26 - { 18.27 - case XC_SERIAL: 18.28 - n = simple_strtol( str+4, &q, 10 ); 18.29 - if ( q > (str + 4) ) xc_num = n; 18.30 - break; 18.31 - case XC_TTY: 18.32 - n = simple_strtol( str+3, &q, 10 ); 18.33 - if ( q > (str + 3) ) xc_num = n; 18.34 - break; 18.35 - default: 18.36 - break; 18.37 - } 18.38 + switch ( xc_mode ) 18.39 + { 18.40 + case XC_SERIAL: 18.41 + n = simple_strtol(str+4, &q, 10); 18.42 + if (q > (str + 4)) 18.43 + xc_num = n; 18.44 + break; 18.45 + case XC_TTY: 18.46 + n = simple_strtol(str+3, &q, 10); 18.47 + if (q > (str + 3)) 18.48 + xc_num = n; 18.49 + break; 18.50 + default: 18.51 + break; 18.52 + } 18.53 18.54 - return 1; 18.55 + return 1; 18.56 } 18.57 __setup("xencons=", xencons_setup); 18.58 18.59 @@ -111,11 +113,11 @@ static unsigned int wc, wp; /* write_con 18.60 18.61 static int __init xencons_bufsz_setup(char *str) 18.62 { 18.63 - unsigned int goal; 18.64 - goal = simple_strtoul(str, NULL, 0); 18.65 - while ( wbuf_size < goal ) 18.66 - wbuf_size <<= 1; 18.67 - return 1; 18.68 + unsigned int goal; 18.69 + goal = simple_strtoul(str, NULL, 0); 18.70 + while (wbuf_size < goal) 18.71 + wbuf_size <<= 1; 18.72 + return 1; 18.73 } 18.74 __setup("xencons_bufsz=", xencons_bufsz_setup); 18.75 18.76 @@ -135,57 +137,55 @@ static struct tty_driver xencons_driver; 18.77 /******************** Kernel console driver ********************************/ 18.78 18.79 static void kcons_write( 18.80 - struct console *c, const char *s, unsigned int count) 18.81 + struct console *c, const char *s, unsigned int count) 18.82 { 18.83 - int i; 18.84 - unsigned long flags; 18.85 + int i; 18.86 + unsigned long flags; 18.87 18.88 - spin_lock_irqsave(&xencons_lock, flags); 18.89 + spin_lock_irqsave(&xencons_lock, flags); 18.90 18.91 - for ( i = 0; i < count; i++ ) 18.92 - { 18.93 - if ( (wp - wc) >= (wbuf_size - 1) ) 18.94 - break; 18.95 - if ( (wbuf[WBUF_MASK(wp++)] = s[i]) == '\n' ) 18.96 - wbuf[WBUF_MASK(wp++)] = '\r'; 18.97 - } 18.98 + for (i = 0; i < count; i++) { 18.99 + if ((wp - wc) >= (wbuf_size - 1)) 18.100 + break; 18.101 + if ((wbuf[WBUF_MASK(wp++)] = s[i]) == '\n') 18.102 + wbuf[WBUF_MASK(wp++)] = '\r'; 18.103 + } 18.104 18.105 - __xencons_tx_flush(); 18.106 + __xencons_tx_flush(); 18.107 18.108 - spin_unlock_irqrestore(&xencons_lock, flags); 18.109 + spin_unlock_irqrestore(&xencons_lock, flags); 18.110 } 18.111 18.112 static void kcons_write_dom0( 18.113 - struct console *c, const char *s, unsigned int count) 18.114 + struct console *c, const char *s, unsigned int count) 18.115 { 18.116 - int rc; 18.117 + int rc; 18.118 18.119 - while ( (count > 0) && 18.120 - ((rc = HYPERVISOR_console_io( 18.121 - CONSOLEIO_write, count, (char *)s)) > 0) ) 18.122 - { 18.123 - count -= rc; 18.124 - s += rc; 18.125 - } 18.126 + while ((count > 0) && 18.127 + ((rc = HYPERVISOR_console_io( 18.128 + CONSOLEIO_write, count, (char *)s)) > 0)) { 18.129 + count -= rc; 18.130 + s += rc; 18.131 + } 18.132 } 18.133 18.134 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 18.135 static struct tty_driver *kcons_device(struct console *c, int *index) 18.136 { 18.137 - *index = c->index; 18.138 - return xencons_driver; 18.139 + *index = c->index; 18.140 + return xencons_driver; 18.141 } 18.142 #else 18.143 static kdev_t kcons_device(struct console *c) 18.144 { 18.145 - return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1); 18.146 + return MKDEV(TTY_MAJOR, (xc_mode == XC_SERIAL) ? 64 : 1); 18.147 } 18.148 #endif 18.149 18.150 static struct console kcons_info = { 18.151 - .device = kcons_device, 18.152 - .flags = CON_PRINTBUFFER, 18.153 - .index = -1, 18.154 + .device = kcons_device, 18.155 + .flags = CON_PRINTBUFFER, 18.156 + .index = -1, 18.157 }; 18.158 18.159 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 18.160 @@ -196,44 +196,42 @@ static int __init xen_console_init(void) 18.161 void xen_console_init(void) 18.162 #endif 18.163 { 18.164 - if ( xen_start_info->flags & SIF_INITDOMAIN ) 18.165 - { 18.166 - if ( xc_mode == XC_DEFAULT ) 18.167 - xc_mode = XC_SERIAL; 18.168 - kcons_info.write = kcons_write_dom0; 18.169 + if (xen_start_info->flags & SIF_INITDOMAIN) { 18.170 + if (xc_mode == XC_DEFAULT) 18.171 + xc_mode = XC_SERIAL; 18.172 + kcons_info.write = kcons_write_dom0; 18.173 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 18.174 - if ( xc_mode == XC_SERIAL ) 18.175 - kcons_info.flags |= CON_ENABLED; 18.176 + if (xc_mode == XC_SERIAL) 18.177 + kcons_info.flags |= CON_ENABLED; 18.178 #endif 18.179 - } 18.180 - else 18.181 - { 18.182 - if ( xc_mode == XC_DEFAULT ) 18.183 - xc_mode = XC_TTY; 18.184 - kcons_info.write = kcons_write; 18.185 - } 18.186 + } else { 18.187 + if (xc_mode == XC_DEFAULT) 18.188 + xc_mode = XC_TTY; 18.189 + kcons_info.write = kcons_write; 18.190 + } 18.191 18.192 - switch ( xc_mode ) 18.193 - { 18.194 - case XC_SERIAL: 18.195 - strcpy(kcons_info.name, "ttyS"); 18.196 - if ( xc_num == -1 ) xc_num = 0; 18.197 - break; 18.198 + switch (xc_mode) { 18.199 + case XC_SERIAL: 18.200 + strcpy(kcons_info.name, "ttyS"); 18.201 + if (xc_num == -1) 18.202 + xc_num = 0; 18.203 + break; 18.204 18.205 - case XC_TTY: 18.206 - strcpy(kcons_info.name, "tty"); 18.207 - if ( xc_num == -1 ) xc_num = 1; 18.208 - break; 18.209 + case XC_TTY: 18.210 + strcpy(kcons_info.name, "tty"); 18.211 + if (xc_num == -1) 18.212 + xc_num = 1; 18.213 + break; 18.214 18.215 - default: 18.216 - return __RETCODE; 18.217 - } 18.218 + default: 18.219 + return __RETCODE; 18.220 + } 18.221 18.222 - wbuf = alloc_bootmem(wbuf_size); 18.223 + wbuf = alloc_bootmem(wbuf_size); 18.224 18.225 - register_console(&kcons_info); 18.226 + register_console(&kcons_info); 18.227 18.228 - return __RETCODE; 18.229 + return __RETCODE; 18.230 } 18.231 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 18.232 console_initcall(xen_console_init); 18.233 @@ -246,41 +244,40 @@ asmlinkage int xprintk(const char *fmt, 18.234 asmlinkage int xprintk(const char *fmt, ...) 18.235 #endif 18.236 { 18.237 - va_list args; 18.238 - int printk_len; 18.239 - static char printk_buf[1024]; 18.240 + va_list args; 18.241 + int printk_len; 18.242 + static char printk_buf[1024]; 18.243 18.244 - /* Emit the output into the temporary buffer */ 18.245 - va_start(args, fmt); 18.246 - printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args); 18.247 - va_end(args); 18.248 + /* Emit the output into the temporary buffer */ 18.249 + va_start(args, fmt); 18.250 + printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args); 18.251 + va_end(args); 18.252 18.253 - /* Send the processed output directly to Xen. */ 18.254 - kcons_write_dom0(NULL, printk_buf, printk_len); 18.255 + /* Send the processed output directly to Xen. */ 18.256 + kcons_write_dom0(NULL, printk_buf, printk_len); 18.257 18.258 - return 0; 18.259 + return 0; 18.260 } 18.261 18.262 /*** Forcibly flush console data before dying. ***/ 18.263 void xencons_force_flush(void) 18.264 { 18.265 - int sz; 18.266 + int sz; 18.267 18.268 - /* Emergency console is synchronous, so there's nothing to flush. */ 18.269 - if ( xen_start_info->flags & SIF_INITDOMAIN ) 18.270 - return; 18.271 + /* Emergency console is synchronous, so there's nothing to flush. */ 18.272 + if (xen_start_info->flags & SIF_INITDOMAIN) 18.273 + return; 18.274 18.275 18.276 - /* Spin until console data is flushed through to the domain controller. */ 18.277 - while ( (wc != wp) ) 18.278 - { 18.279 - int sent = 0; 18.280 - if ( (sz = wp - wc) == 0 ) 18.281 - continue; 18.282 - sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz); 18.283 - if (sent > 0) 18.284 - wc += sent; 18.285 - } 18.286 + /* Spin until console data is flushed through to the daemon. */ 18.287 + while (wc != wp) { 18.288 + int sent = 0; 18.289 + if ((sz = wp - wc) == 0) 18.290 + continue; 18.291 + sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz); 18.292 + if (sent > 0) 18.293 + wc += sent; 18.294 + } 18.295 } 18.296 18.297 18.298 @@ -305,362 +302,358 @@ static char x_char; 18.299 /* Non-privileged receive callback. */ 18.300 static void xencons_rx(char *buf, unsigned len, struct pt_regs *regs) 18.301 { 18.302 - int i; 18.303 - unsigned long flags; 18.304 + int i; 18.305 + unsigned long flags; 18.306 18.307 - spin_lock_irqsave(&xencons_lock, flags); 18.308 - if ( xencons_tty != NULL ) 18.309 - { 18.310 - for ( i = 0; i < len; i++ ) { 18.311 + spin_lock_irqsave(&xencons_lock, flags); 18.312 + if (xencons_tty == NULL) 18.313 + goto out; 18.314 + 18.315 + for (i = 0; i < len; i++) { 18.316 #ifdef CONFIG_MAGIC_SYSRQ 18.317 - if (sysrq_enabled) { 18.318 - if (buf[i] == '\x0f') { /* ^O */ 18.319 - sysrq_requested = jiffies; 18.320 - continue; /* don't print the sysrq key */ 18.321 - } else if (sysrq_requested) { 18.322 - unsigned long sysrq_timeout = sysrq_requested + HZ*2; 18.323 - sysrq_requested = 0; 18.324 - /* if it's been less than a timeout, do the sysrq */ 18.325 - if (time_before(jiffies, sysrq_timeout)) { 18.326 - spin_unlock_irqrestore(&xencons_lock, flags); 18.327 - handle_sysrq(buf[i], regs, xencons_tty); 18.328 - spin_lock_irqsave(&xencons_lock, flags); 18.329 - continue; 18.330 - } 18.331 - } 18.332 - } 18.333 + if (sysrq_enabled) { 18.334 + if (buf[i] == '\x0f') { /* ^O */ 18.335 + sysrq_requested = jiffies; 18.336 + continue; /* don't print the sysrq key */ 18.337 + } else if (sysrq_requested) { 18.338 + unsigned long sysrq_timeout = 18.339 + sysrq_requested + HZ*2; 18.340 + sysrq_requested = 0; 18.341 + if (time_before(jiffies, sysrq_timeout)) { 18.342 + spin_unlock_irqrestore( 18.343 + &xencons_lock, flags); 18.344 + handle_sysrq( 18.345 + buf[i], regs, xencons_tty); 18.346 + spin_lock_irqsave( 18.347 + &xencons_lock, flags); 18.348 + continue; 18.349 + } 18.350 + } 18.351 + } 18.352 #endif 18.353 - tty_insert_flip_char(xencons_tty, buf[i], 0); 18.354 - } 18.355 - tty_flip_buffer_push(xencons_tty); 18.356 - } 18.357 - spin_unlock_irqrestore(&xencons_lock, flags); 18.358 + tty_insert_flip_char(xencons_tty, buf[i], 0); 18.359 + } 18.360 + tty_flip_buffer_push(xencons_tty); 18.361 18.362 + out: 18.363 + spin_unlock_irqrestore(&xencons_lock, flags); 18.364 } 18.365 18.366 /* Privileged and non-privileged transmit worker. */ 18.367 static void __xencons_tx_flush(void) 18.368 { 18.369 - int sz, work_done = 0; 18.370 + int sz, work_done = 0; 18.371 18.372 - if ( xen_start_info->flags & SIF_INITDOMAIN ) 18.373 - { 18.374 - if ( x_char ) 18.375 - { 18.376 - kcons_write_dom0(NULL, &x_char, 1); 18.377 - x_char = 0; 18.378 - work_done = 1; 18.379 - } 18.380 + if (xen_start_info->flags & SIF_INITDOMAIN) { 18.381 + if (x_char) { 18.382 + kcons_write_dom0(NULL, &x_char, 1); 18.383 + x_char = 0; 18.384 + work_done = 1; 18.385 + } 18.386 18.387 - while ( wc != wp ) 18.388 - { 18.389 - sz = wp - wc; 18.390 - if ( sz > (wbuf_size - WBUF_MASK(wc)) ) 18.391 - sz = wbuf_size - WBUF_MASK(wc); 18.392 - kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz); 18.393 - wc += sz; 18.394 - work_done = 1; 18.395 - } 18.396 - } 18.397 - else 18.398 - { 18.399 - while ( x_char ) 18.400 - { 18.401 - if (xencons_ring_send(&x_char, 1) == 1) { 18.402 - x_char = 0; 18.403 - work_done = 1; 18.404 - } 18.405 - } 18.406 + while (wc != wp) { 18.407 + sz = wp - wc; 18.408 + if (sz > (wbuf_size - WBUF_MASK(wc))) 18.409 + sz = wbuf_size - WBUF_MASK(wc); 18.410 + kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz); 18.411 + wc += sz; 18.412 + work_done = 1; 18.413 + } 18.414 + } else { 18.415 + while (x_char) { 18.416 + if (xencons_ring_send(&x_char, 1) == 1) { 18.417 + x_char = 0; 18.418 + work_done = 1; 18.419 + } 18.420 + } 18.421 18.422 - while ( wc != wp ) 18.423 - { 18.424 - int sent; 18.425 - sz = wp - wc; 18.426 - if ( sz > (wbuf_size - WBUF_MASK(wc)) ) 18.427 - sz = wbuf_size - WBUF_MASK(wc); 18.428 - sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz); 18.429 - if ( sent > 0 ) { 18.430 - wc += sent; 18.431 - work_done = 1; 18.432 - } 18.433 - } 18.434 - } 18.435 + while (wc != wp) { 18.436 + int sent; 18.437 + sz = wp - wc; 18.438 + if (sz > (wbuf_size - WBUF_MASK(wc))) 18.439 + sz = wbuf_size - WBUF_MASK(wc); 18.440 + sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz); 18.441 + if (sent > 0) { 18.442 + wc += sent; 18.443 + work_done = 1; 18.444 + } 18.445 + } 18.446 + } 18.447 18.448 - if ( work_done && (xencons_tty != NULL) ) 18.449 - { 18.450 - wake_up_interruptible(&xencons_tty->write_wait); 18.451 - if ( (xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && 18.452 - (xencons_tty->ldisc.write_wakeup != NULL) ) 18.453 - (xencons_tty->ldisc.write_wakeup)(xencons_tty); 18.454 - } 18.455 + if (work_done && (xencons_tty != NULL)) 18.456 + { 18.457 + wake_up_interruptible(&xencons_tty->write_wait); 18.458 + if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && 18.459 + (xencons_tty->ldisc.write_wakeup != NULL)) 18.460 + (xencons_tty->ldisc.write_wakeup)(xencons_tty); 18.461 + } 18.462 } 18.463 18.464 /* Privileged receive callback and transmit kicker. */ 18.465 static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id, 18.466 struct pt_regs *regs) 18.467 { 18.468 - static char rbuf[16]; 18.469 - int i, l; 18.470 - unsigned long flags; 18.471 + static char rbuf[16]; 18.472 + int i, l; 18.473 + unsigned long flags; 18.474 18.475 - spin_lock_irqsave(&xencons_lock, flags); 18.476 + spin_lock_irqsave(&xencons_lock, flags); 18.477 18.478 - if ( xencons_tty != NULL ) 18.479 - { 18.480 - /* Receive work. */ 18.481 - while ( (l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0 ) 18.482 - for ( i = 0; i < l; i++ ) 18.483 - tty_insert_flip_char(xencons_tty, rbuf[i], 0); 18.484 - if ( xencons_tty->flip.count != 0 ) 18.485 - tty_flip_buffer_push(xencons_tty); 18.486 - } 18.487 + if (xencons_tty != NULL) 18.488 + { 18.489 + /* Receive work. */ 18.490 + while ((l = HYPERVISOR_console_io( 18.491 + CONSOLEIO_read, 16, rbuf)) > 0) 18.492 + for (i = 0; i < l; i++) 18.493 + tty_insert_flip_char(xencons_tty, rbuf[i], 0); 18.494 + if (xencons_tty->flip.count != 0) 18.495 + tty_flip_buffer_push(xencons_tty); 18.496 + } 18.497 18.498 - /* Transmit work. */ 18.499 - __xencons_tx_flush(); 18.500 + /* Transmit work. */ 18.501 + __xencons_tx_flush(); 18.502 18.503 - spin_unlock_irqrestore(&xencons_lock, flags); 18.504 + spin_unlock_irqrestore(&xencons_lock, flags); 18.505 18.506 - return IRQ_HANDLED; 18.507 + return IRQ_HANDLED; 18.508 } 18.509 18.510 static int xencons_write_room(struct tty_struct *tty) 18.511 { 18.512 - return wbuf_size - (wp - wc); 18.513 + return wbuf_size - (wp - wc); 18.514 } 18.515 18.516 static int xencons_chars_in_buffer(struct tty_struct *tty) 18.517 { 18.518 - return wp - wc; 18.519 + return wp - wc; 18.520 } 18.521 18.522 static void xencons_send_xchar(struct tty_struct *tty, char ch) 18.523 { 18.524 - unsigned long flags; 18.525 + unsigned long flags; 18.526 18.527 - if ( TTY_INDEX(tty) != 0 ) 18.528 - return; 18.529 + if (TTY_INDEX(tty) != 0) 18.530 + return; 18.531 18.532 - spin_lock_irqsave(&xencons_lock, flags); 18.533 - x_char = ch; 18.534 - __xencons_tx_flush(); 18.535 - spin_unlock_irqrestore(&xencons_lock, flags); 18.536 + spin_lock_irqsave(&xencons_lock, flags); 18.537 + x_char = ch; 18.538 + __xencons_tx_flush(); 18.539 + spin_unlock_irqrestore(&xencons_lock, flags); 18.540 } 18.541 18.542 static void xencons_throttle(struct tty_struct *tty) 18.543 { 18.544 - if ( TTY_INDEX(tty) != 0 ) 18.545 - return; 18.546 + if (TTY_INDEX(tty) != 0) 18.547 + return; 18.548 18.549 - if ( I_IXOFF(tty) ) 18.550 - xencons_send_xchar(tty, STOP_CHAR(tty)); 18.551 + if (I_IXOFF(tty)) 18.552 + xencons_send_xchar(tty, STOP_CHAR(tty)); 18.553 } 18.554 18.555 static void xencons_unthrottle(struct tty_struct *tty) 18.556 { 18.557 - if ( TTY_INDEX(tty) != 0 ) 18.558 - return; 18.559 + if (TTY_INDEX(tty) != 0) 18.560 + return; 18.561 18.562 - if ( I_IXOFF(tty) ) 18.563 - { 18.564 - if ( x_char != 0 ) 18.565 - x_char = 0; 18.566 - else 18.567 - xencons_send_xchar(tty, START_CHAR(tty)); 18.568 - } 18.569 + if (I_IXOFF(tty)) { 18.570 + if (x_char != 0) 18.571 + x_char = 0; 18.572 + else 18.573 + xencons_send_xchar(tty, START_CHAR(tty)); 18.574 + } 18.575 } 18.576 18.577 static void xencons_flush_buffer(struct tty_struct *tty) 18.578 { 18.579 - unsigned long flags; 18.580 + unsigned long flags; 18.581 18.582 - if ( TTY_INDEX(tty) != 0 ) 18.583 - return; 18.584 + if (TTY_INDEX(tty) != 0) 18.585 + return; 18.586 18.587 - spin_lock_irqsave(&xencons_lock, flags); 18.588 - wc = wp = 0; 18.589 - spin_unlock_irqrestore(&xencons_lock, flags); 18.590 + spin_lock_irqsave(&xencons_lock, flags); 18.591 + wc = wp = 0; 18.592 + spin_unlock_irqrestore(&xencons_lock, flags); 18.593 } 18.594 18.595 static inline int __xencons_put_char(int ch) 18.596 { 18.597 - char _ch = (char)ch; 18.598 - if ( (wp - wc) == wbuf_size ) 18.599 - return 0; 18.600 - wbuf[WBUF_MASK(wp++)] = _ch; 18.601 - return 1; 18.602 + char _ch = (char)ch; 18.603 + if ((wp - wc) == wbuf_size) 18.604 + return 0; 18.605 + wbuf[WBUF_MASK(wp++)] = _ch; 18.606 + return 1; 18.607 } 18.608 18.609 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 18.610 static int xencons_write( 18.611 - struct tty_struct *tty, 18.612 - const unsigned char *buf, 18.613 - int count) 18.614 + struct tty_struct *tty, 18.615 + const unsigned char *buf, 18.616 + int count) 18.617 { 18.618 - int i; 18.619 - unsigned long flags; 18.620 + int i; 18.621 + unsigned long flags; 18.622 18.623 - if ( TTY_INDEX(tty) != 0 ) 18.624 - return count; 18.625 + if (TTY_INDEX(tty) != 0) 18.626 + return count; 18.627 18.628 - spin_lock_irqsave(&xencons_lock, flags); 18.629 + spin_lock_irqsave(&xencons_lock, flags); 18.630 18.631 - for ( i = 0; i < count; i++ ) 18.632 - if ( !__xencons_put_char(buf[i]) ) 18.633 - break; 18.634 + for (i = 0; i < count; i++) 18.635 + if (!__xencons_put_char(buf[i])) 18.636 + break; 18.637 18.638 - if ( i != 0 ) 18.639 - __xencons_tx_flush(); 18.640 + if (i != 0) 18.641 + __xencons_tx_flush(); 18.642 18.643 - spin_unlock_irqrestore(&xencons_lock, flags); 18.644 + spin_unlock_irqrestore(&xencons_lock, flags); 18.645 18.646 - return i; 18.647 + return i; 18.648 } 18.649 #else 18.650 static int xencons_write( 18.651 - struct tty_struct *tty, 18.652 - int from_user, 18.653 - const u_char *buf, 18.654 - int count) 18.655 + struct tty_struct *tty, 18.656 + int from_user, 18.657 + const u_char *buf, 18.658 + int count) 18.659 { 18.660 - int i; 18.661 - unsigned long flags; 18.662 + int i; 18.663 + unsigned long flags; 18.664 18.665 - if ( from_user && verify_area(VERIFY_READ, buf, count) ) 18.666 - return -EINVAL; 18.667 + if (from_user && verify_area(VERIFY_READ, buf, count)) 18.668 + return -EINVAL; 18.669 18.670 - if ( TTY_INDEX(tty) != 0 ) 18.671 - return count; 18.672 + if (TTY_INDEX(tty) != 0) 18.673 + return count; 18.674 18.675 - spin_lock_irqsave(&xencons_lock, flags); 18.676 + spin_lock_irqsave(&xencons_lock, flags); 18.677 18.678 - for ( i = 0; i < count; i++ ) 18.679 - { 18.680 - char ch; 18.681 - if ( from_user ) 18.682 - __get_user(ch, buf + i); 18.683 - else 18.684 - ch = buf[i]; 18.685 - if ( !__xencons_put_char(ch) ) 18.686 - break; 18.687 - } 18.688 + for (i = 0; i < count; i++) { 18.689 + char ch; 18.690 + if (from_user) 18.691 + __get_user(ch, buf + i); 18.692 + else 18.693 + ch = buf[i]; 18.694 + if (!__xencons_put_char(ch)) 18.695 + break; 18.696 + } 18.697 18.698 - if ( i != 0 ) 18.699 - __xencons_tx_flush(); 18.700 + if (i != 0) 18.701 + __xencons_tx_flush(); 18.702 18.703 - spin_unlock_irqrestore(&xencons_lock, flags); 18.704 + spin_unlock_irqrestore(&xencons_lock, flags); 18.705 18.706 - return i; 18.707 + return i; 18.708 } 18.709 #endif 18.710 18.711 static void xencons_put_char(struct tty_struct *tty, u_char ch) 18.712 { 18.713 - unsigned long flags; 18.714 + unsigned long flags; 18.715 18.716 - if ( TTY_INDEX(tty) != 0 ) 18.717 - return; 18.718 + if (TTY_INDEX(tty) != 0) 18.719 + return; 18.720 18.721 - spin_lock_irqsave(&xencons_lock, flags); 18.722 - (void)__xencons_put_char(ch); 18.723 - spin_unlock_irqrestore(&xencons_lock, flags); 18.724 + spin_lock_irqsave(&xencons_lock, flags); 18.725 + (void)__xencons_put_char(ch); 18.726 + spin_unlock_irqrestore(&xencons_lock, flags); 18.727 } 18.728 18.729 static void xencons_flush_chars(struct tty_struct *tty) 18.730 { 18.731 - unsigned long flags; 18.732 + unsigned long flags; 18.733 18.734 - if ( TTY_INDEX(tty) != 0 ) 18.735 - return; 18.736 + if (TTY_INDEX(tty) != 0) 18.737 + return; 18.738 18.739 - spin_lock_irqsave(&xencons_lock, flags); 18.740 - __xencons_tx_flush(); 18.741 - spin_unlock_irqrestore(&xencons_lock, flags); 18.742 + spin_lock_irqsave(&xencons_lock, flags); 18.743 + __xencons_tx_flush(); 18.744 + spin_unlock_irqrestore(&xencons_lock, flags); 18.745 } 18.746 18.747 static void xencons_wait_until_sent(struct tty_struct *tty, int timeout) 18.748 { 18.749 - unsigned long orig_jiffies = jiffies; 18.750 + unsigned long orig_jiffies = jiffies; 18.751 18.752 - if ( TTY_INDEX(tty) != 0 ) 18.753 - return; 18.754 + if (TTY_INDEX(tty) != 0) 18.755 + return; 18.756 18.757 - while ( DRV(tty->driver)->chars_in_buffer(tty) ) 18.758 - { 18.759 - set_current_state(TASK_INTERRUPTIBLE); 18.760 - schedule_timeout(1); 18.761 - if ( signal_pending(current) ) 18.762 - break; 18.763 - if ( (timeout != 0) && time_after(jiffies, orig_jiffies + timeout) ) 18.764 - break; 18.765 - } 18.766 + while (DRV(tty->driver)->chars_in_buffer(tty)) 18.767 + { 18.768 + set_current_state(TASK_INTERRUPTIBLE); 18.769 + schedule_timeout(1); 18.770 + if (signal_pending(current)) 18.771 + break; 18.772 + if ( (timeout != 0) && 18.773 + time_after(jiffies, orig_jiffies + timeout) ) 18.774 + break; 18.775 + } 18.776 18.777 - set_current_state(TASK_RUNNING); 18.778 + set_current_state(TASK_RUNNING); 18.779 } 18.780 18.781 static int xencons_open(struct tty_struct *tty, struct file *filp) 18.782 { 18.783 - unsigned long flags; 18.784 + unsigned long flags; 18.785 18.786 - if ( TTY_INDEX(tty) != 0 ) 18.787 - return 0; 18.788 + if (TTY_INDEX(tty) != 0) 18.789 + return 0; 18.790 18.791 - spin_lock_irqsave(&xencons_lock, flags); 18.792 - tty->driver_data = NULL; 18.793 - if ( xencons_tty == NULL ) 18.794 - xencons_tty = tty; 18.795 - __xencons_tx_flush(); 18.796 - spin_unlock_irqrestore(&xencons_lock, flags); 18.797 + spin_lock_irqsave(&xencons_lock, flags); 18.798 + tty->driver_data = NULL; 18.799 + if (xencons_tty == NULL) 18.800 + xencons_tty = tty; 18.801 + __xencons_tx_flush(); 18.802 + spin_unlock_irqrestore(&xencons_lock, flags); 18.803 18.804 - return 0; 18.805 + return 0; 18.806 } 18.807 18.808 static void xencons_close(struct tty_struct *tty, struct file *filp) 18.809 { 18.810 - unsigned long flags; 18.811 + unsigned long flags; 18.812 18.813 - if ( TTY_INDEX(tty) != 0 ) 18.814 - return; 18.815 + if (TTY_INDEX(tty) != 0) 18.816 + return; 18.817 18.818 - if ( tty->count == 1 ) 18.819 - { 18.820 - tty->closing = 1; 18.821 - tty_wait_until_sent(tty, 0); 18.822 - if ( DRV(tty->driver)->flush_buffer != NULL ) 18.823 - DRV(tty->driver)->flush_buffer(tty); 18.824 - if ( tty->ldisc.flush_buffer != NULL ) 18.825 - tty->ldisc.flush_buffer(tty); 18.826 - tty->closing = 0; 18.827 - spin_lock_irqsave(&xencons_lock, flags); 18.828 - xencons_tty = NULL; 18.829 - spin_unlock_irqrestore(&xencons_lock, flags); 18.830 - } 18.831 + if (tty->count == 1) { 18.832 + tty->closing = 1; 18.833 + tty_wait_until_sent(tty, 0); 18.834 + if (DRV(tty->driver)->flush_buffer != NULL) 18.835 + DRV(tty->driver)->flush_buffer(tty); 18.836 + if (tty->ldisc.flush_buffer != NULL) 18.837 + tty->ldisc.flush_buffer(tty); 18.838 + tty->closing = 0; 18.839 + spin_lock_irqsave(&xencons_lock, flags); 18.840 + xencons_tty = NULL; 18.841 + spin_unlock_irqrestore(&xencons_lock, flags); 18.842 + } 18.843 } 18.844 18.845 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 18.846 static struct tty_operations xencons_ops = { 18.847 - .open = xencons_open, 18.848 - .close = xencons_close, 18.849 - .write = xencons_write, 18.850 - .write_room = xencons_write_room, 18.851 - .put_char = xencons_put_char, 18.852 - .flush_chars = xencons_flush_chars, 18.853 - .chars_in_buffer = xencons_chars_in_buffer, 18.854 - .send_xchar = xencons_send_xchar, 18.855 - .flush_buffer = xencons_flush_buffer, 18.856 - .throttle = xencons_throttle, 18.857 - .unthrottle = xencons_unthrottle, 18.858 - .wait_until_sent = xencons_wait_until_sent, 18.859 + .open = xencons_open, 18.860 + .close = xencons_close, 18.861 + .write = xencons_write, 18.862 + .write_room = xencons_write_room, 18.863 + .put_char = xencons_put_char, 18.864 + .flush_chars = xencons_flush_chars, 18.865 + .chars_in_buffer = xencons_chars_in_buffer, 18.866 + .send_xchar = xencons_send_xchar, 18.867 + .flush_buffer = xencons_flush_buffer, 18.868 + .throttle = xencons_throttle, 18.869 + .unthrottle = xencons_unthrottle, 18.870 + .wait_until_sent = xencons_wait_until_sent, 18.871 }; 18.872 18.873 #ifdef CONFIG_XEN_PRIVILEGED_GUEST 18.874 static const char *xennullcon_startup(void) 18.875 { 18.876 - return NULL; 18.877 + return NULL; 18.878 } 18.879 18.880 static int xennullcon_dummy(void) 18.881 { 18.882 - return 0; 18.883 + return 0; 18.884 } 18.885 18.886 #define DUMMY (void *)xennullcon_dummy 18.887 @@ -672,122 +665,128 @@ static int xennullcon_dummy(void) 18.888 */ 18.889 18.890 const struct consw xennull_con = { 18.891 - .owner = THIS_MODULE, 18.892 - .con_startup = xennullcon_startup, 18.893 - .con_init = DUMMY, 18.894 - .con_deinit = DUMMY, 18.895 - .con_clear = DUMMY, 18.896 - .con_putc = DUMMY, 18.897 - .con_putcs = DUMMY, 18.898 - .con_cursor = DUMMY, 18.899 - .con_scroll = DUMMY, 18.900 - .con_bmove = DUMMY, 18.901 - .con_switch = DUMMY, 18.902 - .con_blank = DUMMY, 18.903 - .con_font_set = DUMMY, 18.904 - .con_font_get = DUMMY, 18.905 - .con_font_default = DUMMY, 18.906 - .con_font_copy = DUMMY, 18.907 - .con_set_palette = DUMMY, 18.908 - .con_scrolldelta = DUMMY, 18.909 + .owner = THIS_MODULE, 18.910 + .con_startup = xennullcon_startup, 18.911 + .con_init = DUMMY, 18.912 + .con_deinit = DUMMY, 18.913 + .con_clear = DUMMY, 18.914 + .con_putc = DUMMY, 18.915 + .con_putcs = DUMMY, 18.916 + .con_cursor = DUMMY, 18.917 + .con_scroll = DUMMY, 18.918 + .con_bmove = DUMMY, 18.919 + .con_switch = DUMMY, 18.920 + .con_blank = DUMMY, 18.921 + .con_font_set = DUMMY, 18.922 + .con_font_get = DUMMY, 18.923 + .con_font_default = DUMMY, 18.924 + .con_font_copy = DUMMY, 18.925 + .con_set_palette = DUMMY, 18.926 + .con_scrolldelta = DUMMY, 18.927 }; 18.928 #endif 18.929 #endif 18.930 18.931 static int __init xencons_init(void) 18.932 { 18.933 - int rc; 18.934 + int rc; 18.935 18.936 - if ( xc_mode == XC_OFF ) 18.937 - return 0; 18.938 + if (xc_mode == XC_OFF) 18.939 + return 0; 18.940 18.941 - xencons_ring_init(); 18.942 + xencons_ring_init(); 18.943 18.944 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 18.945 - xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 18.946 - 1 : MAX_NR_CONSOLES); 18.947 - if ( xencons_driver == NULL ) 18.948 - return -ENOMEM; 18.949 + xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 18.950 + 1 : MAX_NR_CONSOLES); 18.951 + if (xencons_driver == NULL) 18.952 + return -ENOMEM; 18.953 #else 18.954 - memset(&xencons_driver, 0, sizeof(struct tty_driver)); 18.955 - xencons_driver.magic = TTY_DRIVER_MAGIC; 18.956 - xencons_driver.refcount = &xencons_refcount; 18.957 - xencons_driver.table = xencons_table; 18.958 - xencons_driver.num = (xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES; 18.959 + memset(&xencons_driver, 0, sizeof(struct tty_driver)); 18.960 + xencons_driver.magic = TTY_DRIVER_MAGIC; 18.961 + xencons_driver.refcount = &xencons_refcount; 18.962 + xencons_driver.table = xencons_table; 18.963 + xencons_driver.num = 18.964 + (xc_mode == XC_SERIAL) ? 1 : MAX_NR_CONSOLES; 18.965 #endif 18.966 18.967 - DRV(xencons_driver)->major = TTY_MAJOR; 18.968 - DRV(xencons_driver)->type = TTY_DRIVER_TYPE_SERIAL; 18.969 - DRV(xencons_driver)->subtype = SERIAL_TYPE_NORMAL; 18.970 - DRV(xencons_driver)->init_termios = tty_std_termios; 18.971 - DRV(xencons_driver)->flags = 18.972 - TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS; 18.973 - DRV(xencons_driver)->termios = xencons_termios; 18.974 - DRV(xencons_driver)->termios_locked = xencons_termios_locked; 18.975 + DRV(xencons_driver)->major = TTY_MAJOR; 18.976 + DRV(xencons_driver)->type = TTY_DRIVER_TYPE_SERIAL; 18.977 + DRV(xencons_driver)->subtype = SERIAL_TYPE_NORMAL; 18.978 + DRV(xencons_driver)->init_termios = tty_std_termios; 18.979 + DRV(xencons_driver)->flags = 18.980 + TTY_DRIVER_REAL_RAW | 18.981 + TTY_DRIVER_RESET_TERMIOS | 18.982 + TTY_DRIVER_NO_DEVFS; 18.983 + DRV(xencons_driver)->termios = xencons_termios; 18.984 + DRV(xencons_driver)->termios_locked = xencons_termios_locked; 18.985 18.986 - if ( xc_mode == XC_SERIAL ) 18.987 - { 18.988 - DRV(xencons_driver)->name = "ttyS"; 18.989 - DRV(xencons_driver)->minor_start = 64 + xc_num; 18.990 - DRV(xencons_driver)->name_base = 0 + xc_num; 18.991 - } 18.992 - else 18.993 - { 18.994 - DRV(xencons_driver)->name = "tty"; 18.995 - DRV(xencons_driver)->minor_start = xc_num; 18.996 - DRV(xencons_driver)->name_base = xc_num; 18.997 - } 18.998 + if (xc_mode == XC_SERIAL) 18.999 + { 18.1000 + DRV(xencons_driver)->name = "ttyS"; 18.1001 + DRV(xencons_driver)->minor_start = 64 + xc_num; 18.1002 + DRV(xencons_driver)->name_base = 0 + xc_num; 18.1003 + } else { 18.1004 + DRV(xencons_driver)->name = "tty"; 18.1005 + DRV(xencons_driver)->minor_start = xc_num; 18.1006 + DRV(xencons_driver)->name_base = xc_num; 18.1007 + } 18.1008 18.1009 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 18.1010 - tty_set_operations(xencons_driver, &xencons_ops); 18.1011 + tty_set_operations(xencons_driver, &xencons_ops); 18.1012 #else 18.1013 - xencons_driver.open = xencons_open; 18.1014 - xencons_driver.close = xencons_close; 18.1015 - xencons_driver.write = xencons_write; 18.1016 - xencons_driver.write_room = xencons_write_room; 18.1017 - xencons_driver.put_char = xencons_put_char; 18.1018 - xencons_driver.flush_chars = xencons_flush_chars; 18.1019 - xencons_driver.chars_in_buffer = xencons_chars_in_buffer; 18.1020 - xencons_driver.send_xchar = xencons_send_xchar; 18.1021 - xencons_driver.flush_buffer = xencons_flush_buffer; 18.1022 - xencons_driver.throttle = xencons_throttle; 18.1023 - xencons_driver.unthrottle = xencons_unthrottle; 18.1024 - xencons_driver.wait_until_sent = xencons_wait_until_sent; 18.1025 + xencons_driver.open = xencons_open; 18.1026 + xencons_driver.close = xencons_close; 18.1027 + xencons_driver.write = xencons_write; 18.1028 + xencons_driver.write_room = xencons_write_room; 18.1029 + xencons_driver.put_char = xencons_put_char; 18.1030 + xencons_driver.flush_chars = xencons_flush_chars; 18.1031 + xencons_driver.chars_in_buffer = xencons_chars_in_buffer; 18.1032 + xencons_driver.send_xchar = xencons_send_xchar; 18.1033 + xencons_driver.flush_buffer = xencons_flush_buffer; 18.1034 + xencons_driver.throttle = xencons_throttle; 18.1035 + xencons_driver.unthrottle = xencons_unthrottle; 18.1036 + xencons_driver.wait_until_sent = xencons_wait_until_sent; 18.1037 #endif 18.1038 18.1039 - if ( (rc = tty_register_driver(DRV(xencons_driver))) != 0 ) 18.1040 - { 18.1041 - printk("WARNING: Failed to register Xen virtual " 18.1042 - "console driver as '%s%d'\n", 18.1043 - DRV(xencons_driver)->name, DRV(xencons_driver)->name_base); 18.1044 + if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) { 18.1045 + printk("WARNING: Failed to register Xen virtual " 18.1046 + "console driver as '%s%d'\n", 18.1047 + DRV(xencons_driver)->name, DRV(xencons_driver)->name_base); 18.1048 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 18.1049 - put_tty_driver(xencons_driver); 18.1050 - xencons_driver = NULL; 18.1051 + put_tty_driver(xencons_driver); 18.1052 + xencons_driver = NULL; 18.1053 #endif 18.1054 - return rc; 18.1055 - } 18.1056 + return rc; 18.1057 + } 18.1058 18.1059 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 18.1060 - tty_register_device(xencons_driver, 0, NULL); 18.1061 + tty_register_device(xencons_driver, 0, NULL); 18.1062 #endif 18.1063 18.1064 - if ( xen_start_info->flags & SIF_INITDOMAIN ) 18.1065 - { 18.1066 - xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE); 18.1067 - (void)request_irq(xencons_priv_irq, 18.1068 - xencons_priv_interrupt, 0, "console", NULL); 18.1069 - } 18.1070 - else 18.1071 - { 18.1072 - 18.1073 - xencons_ring_register_receiver(xencons_rx); 18.1074 - } 18.1075 + if (xen_start_info->flags & SIF_INITDOMAIN) { 18.1076 + xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE); 18.1077 + (void)request_irq(xencons_priv_irq, 18.1078 + xencons_priv_interrupt, 0, "console", NULL); 18.1079 + } else { 18.1080 + xencons_ring_register_receiver(xencons_rx); 18.1081 + } 18.1082 18.1083 - printk("Xen virtual console successfully installed as %s%d\n", 18.1084 - DRV(xencons_driver)->name, 18.1085 - DRV(xencons_driver)->name_base ); 18.1086 + printk("Xen virtual console successfully installed as %s%d\n", 18.1087 + DRV(xencons_driver)->name, 18.1088 + DRV(xencons_driver)->name_base ); 18.1089 18.1090 - return 0; 18.1091 + return 0; 18.1092 } 18.1093 18.1094 module_init(xencons_init); 18.1095 + 18.1096 +/* 18.1097 + * Local variables: 18.1098 + * c-file-style: "linux" 18.1099 + * indent-tabs-mode: t 18.1100 + * c-indent-level: 8 18.1101 + * c-basic-offset: 8 18.1102 + * tab-width: 8 18.1103 + * End: 18.1104 + */
19.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c Thu Sep 22 16:05:44 2005 +0100 19.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c Thu Sep 22 16:12:14 2005 +0100 19.3 @@ -125,3 +125,13 @@ void xencons_resume(void) 19.4 19.5 (void)xencons_ring_init(); 19.6 } 19.7 + 19.8 +/* 19.9 + * Local variables: 19.10 + * c-file-style: "linux" 19.11 + * indent-tabs-mode: t 19.12 + * c-indent-level: 8 19.13 + * c-basic-offset: 8 19.14 + * tab-width: 8 19.15 + * End: 19.16 + */
20.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h Thu Sep 22 16:05:44 2005 +0100 20.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h Thu Sep 22 16:12:14 2005 +0100 20.3 @@ -3,12 +3,21 @@ 20.4 20.5 asmlinkage int xprintk(const char *fmt, ...); 20.6 20.7 - 20.8 int xencons_ring_init(void); 20.9 int xencons_ring_send(const char *data, unsigned len); 20.10 20.11 -typedef void (xencons_receiver_func)(char *buf, unsigned len, 20.12 - struct pt_regs *regs); 20.13 +typedef void (xencons_receiver_func)( 20.14 + char *buf, unsigned len, struct pt_regs *regs); 20.15 void xencons_ring_register_receiver(xencons_receiver_func *f); 20.16 20.17 #endif /* _XENCONS_RING_H */ 20.18 + 20.19 +/* 20.20 + * Local variables: 20.21 + * c-file-style: "linux" 20.22 + * indent-tabs-mode: t 20.23 + * c-indent-level: 8 20.24 + * c-basic-offset: 8 20.25 + * tab-width: 8 20.26 + * End: 20.27 + */
21.1 --- a/linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c Thu Sep 22 16:05:44 2005 +0100 21.2 +++ b/linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c Thu Sep 22 16:12:14 2005 +0100 21.3 @@ -1,9 +1,9 @@ 21.4 /****************************************************************************** 21.5 * evtchn.c 21.6 * 21.7 - * Xenolinux driver for receiving and demuxing event-channel signals. 21.8 + * Driver for receiving and demuxing event-channel signals. 21.9 * 21.10 - * Copyright (c) 2004, K A Fraser 21.11 + * Copyright (c) 2004-2005, K A Fraser 21.12 * Multi-process extensions Copyright (c) 2004, Steven Smith 21.13 * 21.14 * This file may be distributed separately from the Linux kernel, or 21.15 @@ -46,29 +46,18 @@ 21.16 #include <linux/init.h> 21.17 #define XEN_EVTCHN_MASK_OPS 21.18 #include <asm-xen/evtchn.h> 21.19 - 21.20 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 21.21 -#include <linux/devfs_fs_kernel.h> 21.22 -#define OLD_DEVFS 21.23 -#else 21.24 #include <linux/gfp.h> 21.25 -#endif 21.26 - 21.27 -#ifdef OLD_DEVFS 21.28 -/* NB. This must be shared amongst drivers if more things go in /dev/xen */ 21.29 -static devfs_handle_t xen_dev_dir; 21.30 -#endif 21.31 21.32 struct per_user_data { 21.33 - /* Notification ring, accessed via /dev/xen/evtchn. */ 21.34 -# define EVTCHN_RING_SIZE 2048 /* 2048 16-bit entries */ 21.35 -# define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1)) 21.36 - u16 *ring; 21.37 - unsigned int ring_cons, ring_prod, ring_overflow; 21.38 + /* Notification ring, accessed via /dev/xen/evtchn. */ 21.39 +#define EVTCHN_RING_SIZE 2048 /* 2048 16-bit entries */ 21.40 +#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1)) 21.41 + u16 *ring; 21.42 + unsigned int ring_cons, ring_prod, ring_overflow; 21.43 21.44 - /* Processes wait on this queue when ring is empty. */ 21.45 - wait_queue_head_t evtchn_wait; 21.46 - struct fasync_struct *evtchn_async_queue; 21.47 + /* Processes wait on this queue when ring is empty. */ 21.48 + wait_queue_head_t evtchn_wait; 21.49 + struct fasync_struct *evtchn_async_queue; 21.50 }; 21.51 21.52 /* Who's bound to each port? */ 21.53 @@ -77,356 +66,310 @@ static spinlock_t port_user_lock; 21.54 21.55 void evtchn_device_upcall(int port) 21.56 { 21.57 - struct per_user_data *u; 21.58 + struct per_user_data *u; 21.59 21.60 - spin_lock(&port_user_lock); 21.61 + spin_lock(&port_user_lock); 21.62 21.63 - mask_evtchn(port); 21.64 - clear_evtchn(port); 21.65 + mask_evtchn(port); 21.66 + clear_evtchn(port); 21.67 21.68 - if ( (u = port_user[port]) != NULL ) 21.69 - { 21.70 - if ( (u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE ) 21.71 - { 21.72 - u->ring[EVTCHN_RING_MASK(u->ring_prod)] = (u16)port; 21.73 - if ( u->ring_cons == u->ring_prod++ ) 21.74 - { 21.75 - wake_up_interruptible(&u->evtchn_wait); 21.76 - kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN); 21.77 - } 21.78 - } 21.79 - else 21.80 - { 21.81 - u->ring_overflow = 1; 21.82 - } 21.83 - } 21.84 + if ((u = port_user[port]) != NULL) { 21.85 + if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { 21.86 + u->ring[EVTCHN_RING_MASK(u->ring_prod)] = (u16)port; 21.87 + if (u->ring_cons == u->ring_prod++) { 21.88 + wake_up_interruptible(&u->evtchn_wait); 21.89 + kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN); 21.90 + } 21.91 + } else { 21.92 + u->ring_overflow = 1; 21.93 + } 21.94 + } 21.95 21.96 - spin_unlock(&port_user_lock); 21.97 + spin_unlock(&port_user_lock); 21.98 } 21.99 21.100 static ssize_t evtchn_read(struct file *file, char *buf, 21.101 size_t count, loff_t *ppos) 21.102 { 21.103 - int rc; 21.104 - unsigned int c, p, bytes1 = 0, bytes2 = 0; 21.105 - DECLARE_WAITQUEUE(wait, current); 21.106 - struct per_user_data *u = file->private_data; 21.107 + int rc; 21.108 + unsigned int c, p, bytes1 = 0, bytes2 = 0; 21.109 + DECLARE_WAITQUEUE(wait, current); 21.110 + struct per_user_data *u = file->private_data; 21.111 21.112 - add_wait_queue(&u->evtchn_wait, &wait); 21.113 - 21.114 - count &= ~1; /* even number of bytes */ 21.115 + add_wait_queue(&u->evtchn_wait, &wait); 21.116 21.117 - if ( count == 0 ) 21.118 - { 21.119 - rc = 0; 21.120 - goto out; 21.121 - } 21.122 + count &= ~1; /* even number of bytes */ 21.123 21.124 - if ( count > PAGE_SIZE ) 21.125 - count = PAGE_SIZE; 21.126 + if (count == 0) { 21.127 + rc = 0; 21.128 + goto out; 21.129 + } 21.130 21.131 - for ( ; ; ) 21.132 - { 21.133 - set_current_state(TASK_INTERRUPTIBLE); 21.134 + if (count > PAGE_SIZE) 21.135 + count = PAGE_SIZE; 21.136 21.137 - if ( (c = u->ring_cons) != (p = u->ring_prod) ) 21.138 - break; 21.139 + for (;;) { 21.140 + set_current_state(TASK_INTERRUPTIBLE); 21.141 + 21.142 + if ((c = u->ring_cons) != (p = u->ring_prod)) 21.143 + break; 21.144 21.145 - if ( u->ring_overflow ) 21.146 - { 21.147 - rc = -EFBIG; 21.148 - goto out; 21.149 - } 21.150 + if (u->ring_overflow) { 21.151 + rc = -EFBIG; 21.152 + goto out; 21.153 + } 21.154 21.155 - if ( file->f_flags & O_NONBLOCK ) 21.156 - { 21.157 - rc = -EAGAIN; 21.158 - goto out; 21.159 - } 21.160 + if (file->f_flags & O_NONBLOCK) { 21.161 + rc = -EAGAIN; 21.162 + goto out; 21.163 + } 21.164 21.165 - if ( signal_pending(current) ) 21.166 - { 21.167 - rc = -ERESTARTSYS; 21.168 - goto out; 21.169 - } 21.170 + if (signal_pending(current)) { 21.171 + rc = -ERESTARTSYS; 21.172 + goto out; 21.173 + } 21.174 21.175 - schedule(); 21.176 - } 21.177 + schedule(); 21.178 + } 21.179 21.180 - /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ 21.181 - if ( ((c ^ p) & EVTCHN_RING_SIZE) != 0 ) 21.182 - { 21.183 - bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * sizeof(u16); 21.184 - bytes2 = EVTCHN_RING_MASK(p) * sizeof(u16); 21.185 - } 21.186 - else 21.187 - { 21.188 - bytes1 = (p - c) * sizeof(u16); 21.189 - bytes2 = 0; 21.190 - } 21.191 + /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ 21.192 + if (((c ^ p) & EVTCHN_RING_SIZE) != 0) { 21.193 + bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * 21.194 + sizeof(u16); 21.195 + bytes2 = EVTCHN_RING_MASK(p) * sizeof(u16); 21.196 + } else { 21.197 + bytes1 = (p - c) * sizeof(u16); 21.198 + bytes2 = 0; 21.199 + } 21.200 21.201 - /* Truncate chunks according to caller's maximum byte count. */ 21.202 - if ( bytes1 > count ) 21.203 - { 21.204 - bytes1 = count; 21.205 - bytes2 = 0; 21.206 - } 21.207 - else if ( (bytes1 + bytes2) > count ) 21.208 - { 21.209 - bytes2 = count - bytes1; 21.210 - } 21.211 + /* Truncate chunks according to caller's maximum byte count. */ 21.212 + if (bytes1 > count) { 21.213 + bytes1 = count; 21.214 + bytes2 = 0; 21.215 + } else if ((bytes1 + bytes2) > count) { 21.216 + bytes2 = count - bytes1; 21.217 + } 21.218 21.219 - if ( copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) || 21.220 - ((bytes2 != 0) && copy_to_user(&buf[bytes1], &u->ring[0], bytes2)) ) 21.221 - { 21.222 - rc = -EFAULT; 21.223 - goto out; 21.224 - } 21.225 + if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) || 21.226 + ((bytes2 != 0) && 21.227 + copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) { 21.228 + rc = -EFAULT; 21.229 + goto out; 21.230 + } 21.231 21.232 - u->ring_cons += (bytes1 + bytes2) / sizeof(u16); 21.233 + u->ring_cons += (bytes1 + bytes2) / sizeof(u16); 21.234 21.235 - rc = bytes1 + bytes2; 21.236 + rc = bytes1 + bytes2; 21.237 21.238 out: 21.239 - __set_current_state(TASK_RUNNING); 21.240 - remove_wait_queue(&u->evtchn_wait, &wait); 21.241 - return rc; 21.242 + __set_current_state(TASK_RUNNING); 21.243 + remove_wait_queue(&u->evtchn_wait, &wait); 21.244 + return rc; 21.245 } 21.246 21.247 static ssize_t evtchn_write(struct file *file, const char *buf, 21.248 size_t count, loff_t *ppos) 21.249 { 21.250 - int rc, i; 21.251 - u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL); 21.252 - struct per_user_data *u = file->private_data; 21.253 + int rc, i; 21.254 + u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL); 21.255 + struct per_user_data *u = file->private_data; 21.256 21.257 - if ( kbuf == NULL ) 21.258 - return -ENOMEM; 21.259 + if (kbuf == NULL) 21.260 + return -ENOMEM; 21.261 21.262 - count &= ~1; /* even number of bytes */ 21.263 + count &= ~1; /* even number of bytes */ 21.264 21.265 - if ( count == 0 ) 21.266 - { 21.267 - rc = 0; 21.268 - goto out; 21.269 - } 21.270 + if (count == 0) { 21.271 + rc = 0; 21.272 + goto out; 21.273 + } 21.274 21.275 - if ( count > PAGE_SIZE ) 21.276 - count = PAGE_SIZE; 21.277 + if (count > PAGE_SIZE) 21.278 + count = PAGE_SIZE; 21.279 + 21.280 + if (copy_from_user(kbuf, buf, count) != 0) { 21.281 + rc = -EFAULT; 21.282 + goto out; 21.283 + } 21.284 21.285 - if ( copy_from_user(kbuf, buf, count) != 0 ) 21.286 - { 21.287 - rc = -EFAULT; 21.288 - goto out; 21.289 - } 21.290 + spin_lock_irq(&port_user_lock); 21.291 + for (i = 0; i < (count/2); i++) 21.292 + if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u)) 21.293 + unmask_evtchn(kbuf[i]); 21.294 + spin_unlock_irq(&port_user_lock); 21.295 21.296 - spin_lock_irq(&port_user_lock); 21.297 - for ( i = 0; i < (count/2); i++ ) 21.298 - if ( (kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u) ) 21.299 - unmask_evtchn(kbuf[i]); 21.300 - spin_unlock_irq(&port_user_lock); 21.301 - 21.302 - rc = count; 21.303 + rc = count; 21.304 21.305 out: 21.306 - free_page((unsigned long)kbuf); 21.307 - return rc; 21.308 + free_page((unsigned long)kbuf); 21.309 + return rc; 21.310 } 21.311 21.312 static int evtchn_ioctl(struct inode *inode, struct file *file, 21.313 unsigned int cmd, unsigned long arg) 21.314 { 21.315 - int rc = 0; 21.316 - struct per_user_data *u = file->private_data; 21.317 + int rc = 0; 21.318 + struct per_user_data *u = file->private_data; 21.319 21.320 - spin_lock_irq(&port_user_lock); 21.321 + spin_lock_irq(&port_user_lock); 21.322 21.323 - switch ( cmd ) 21.324 - { 21.325 - case EVTCHN_RESET: 21.326 - /* Initialise the ring to empty. Clear errors. */ 21.327 - u->ring_cons = u->ring_prod = u->ring_overflow = 0; 21.328 - break; 21.329 + switch (cmd) { 21.330 + case EVTCHN_RESET: 21.331 + /* Initialise the ring to empty. Clear errors. */ 21.332 + u->ring_cons = u->ring_prod = u->ring_overflow = 0; 21.333 + break; 21.334 21.335 - case EVTCHN_BIND: 21.336 - if ( arg >= NR_EVENT_CHANNELS ) 21.337 - { 21.338 - rc = -EINVAL; 21.339 - } 21.340 - else if ( port_user[arg] != NULL ) 21.341 - { 21.342 - rc = -EISCONN; 21.343 - } 21.344 - else 21.345 - { 21.346 - port_user[arg] = u; 21.347 - unmask_evtchn(arg); 21.348 - } 21.349 - break; 21.350 + case EVTCHN_BIND: 21.351 + if (arg >= NR_EVENT_CHANNELS) { 21.352 + rc = -EINVAL; 21.353 + } else if (port_user[arg] != NULL) { 21.354 + rc = -EISCONN; 21.355 + } else { 21.356 + port_user[arg] = u; 21.357 + unmask_evtchn(arg); 21.358 + } 21.359 + break; 21.360 21.361 - case EVTCHN_UNBIND: 21.362 - if ( arg >= NR_EVENT_CHANNELS ) 21.363 - { 21.364 - rc = -EINVAL; 21.365 - } 21.366 - else if ( port_user[arg] != u ) 21.367 - { 21.368 - rc = -ENOTCONN; 21.369 - } 21.370 - else 21.371 - { 21.372 - port_user[arg] = NULL; 21.373 - mask_evtchn(arg); 21.374 - } 21.375 - break; 21.376 + case EVTCHN_UNBIND: 21.377 + if (arg >= NR_EVENT_CHANNELS) { 21.378 + rc = -EINVAL; 21.379 + } else if (port_user[arg] != u) { 21.380 + rc = -ENOTCONN; 21.381 + } else { 21.382 + port_user[arg] = NULL; 21.383 + mask_evtchn(arg); 21.384 + } 21.385 + break; 21.386 21.387 - default: 21.388 - rc = -ENOSYS; 21.389 - break; 21.390 - } 21.391 + default: 21.392 + rc = -ENOSYS; 21.393 + break; 21.394 + } 21.395 21.396 - spin_unlock_irq(&port_user_lock); 21.397 + spin_unlock_irq(&port_user_lock); 21.398 21.399 - return rc; 21.400 + return rc; 21.401 } 21.402 21.403 static unsigned int evtchn_poll(struct file *file, poll_table *wait) 21.404 { 21.405 - unsigned int mask = POLLOUT | POLLWRNORM; 21.406 - struct per_user_data *u = file->private_data; 21.407 + unsigned int mask = POLLOUT | POLLWRNORM; 21.408 + struct per_user_data *u = file->private_data; 21.409 21.410 - poll_wait(file, &u->evtchn_wait, wait); 21.411 - if ( u->ring_cons != u->ring_prod ) 21.412 - mask |= POLLIN | POLLRDNORM; 21.413 - if ( u->ring_overflow ) 21.414 - mask = POLLERR; 21.415 - return mask; 21.416 + poll_wait(file, &u->evtchn_wait, wait); 21.417 + if (u->ring_cons != u->ring_prod) 21.418 + mask |= POLLIN | POLLRDNORM; 21.419 + if (u->ring_overflow) 21.420 + mask = POLLERR; 21.421 + return mask; 21.422 } 21.423 21.424 static int evtchn_fasync(int fd, struct file *filp, int on) 21.425 { 21.426 - struct per_user_data *u = filp->private_data; 21.427 - return fasync_helper(fd, filp, on, &u->evtchn_async_queue); 21.428 + struct per_user_data *u = filp->private_data; 21.429 + return fasync_helper(fd, filp, on, &u->evtchn_async_queue); 21.430 } 21.431 21.432 static int evtchn_open(struct inode *inode, struct file *filp) 21.433 { 21.434 - struct per_user_data *u; 21.435 + struct per_user_data *u; 21.436 21.437 - if ( (u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL ) 21.438 - return -ENOMEM; 21.439 + if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL) 21.440 + return -ENOMEM; 21.441 21.442 - memset(u, 0, sizeof(*u)); 21.443 - init_waitqueue_head(&u->evtchn_wait); 21.444 + memset(u, 0, sizeof(*u)); 21.445 + init_waitqueue_head(&u->evtchn_wait); 21.446 21.447 - if ( (u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL ) 21.448 - { 21.449 - kfree(u); 21.450 - return -ENOMEM; 21.451 - } 21.452 + if ((u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL) 21.453 + { 21.454 + kfree(u); 21.455 + return -ENOMEM; 21.456 + } 21.457 21.458 - filp->private_data = u; 21.459 + filp->private_data = u; 21.460 21.461 - return 0; 21.462 + return 0; 21.463 } 21.464 21.465 static int evtchn_release(struct inode *inode, struct file *filp) 21.466 { 21.467 - int i; 21.468 - struct per_user_data *u = filp->private_data; 21.469 + int i; 21.470 + struct per_user_data *u = filp->private_data; 21.471 21.472 - spin_lock_irq(&port_user_lock); 21.473 + spin_lock_irq(&port_user_lock); 21.474 21.475 - free_page((unsigned long)u->ring); 21.476 + free_page((unsigned long)u->ring); 21.477 21.478 - for ( i = 0; i < NR_EVENT_CHANNELS; i++ ) 21.479 - { 21.480 - if ( port_user[i] == u ) 21.481 - { 21.482 - port_user[i] = NULL; 21.483 - mask_evtchn(i); 21.484 - } 21.485 - } 21.486 + for (i = 0; i < NR_EVENT_CHANNELS; i++) 21.487 + { 21.488 + if (port_user[i] == u) 21.489 + { 21.490 + port_user[i] = NULL; 21.491 + mask_evtchn(i); 21.492 + } 21.493 + } 21.494 21.495 - spin_unlock_irq(&port_user_lock); 21.496 + spin_unlock_irq(&port_user_lock); 21.497 21.498 - kfree(u); 21.499 + kfree(u); 21.500 21.501 - return 0; 21.502 + return 0; 21.503 } 21.504 21.505 static struct file_operations evtchn_fops = { 21.506 - .owner = THIS_MODULE, 21.507 - .read = evtchn_read, 21.508 - .write = evtchn_write, 21.509 - .ioctl = evtchn_ioctl, 21.510 - .poll = evtchn_poll, 21.511 - .fasync = evtchn_fasync, 21.512 - .open = evtchn_open, 21.513 - .release = evtchn_release, 21.514 + .owner = THIS_MODULE, 21.515 + .read = evtchn_read, 21.516 + .write = evtchn_write, 21.517 + .ioctl = evtchn_ioctl, 21.518 + .poll = evtchn_poll, 21.519 + .fasync = evtchn_fasync, 21.520 + .open = evtchn_open, 21.521 + .release = evtchn_release, 21.522 }; 21.523 21.524 static struct miscdevice evtchn_miscdev = { 21.525 - .minor = EVTCHN_MINOR, 21.526 - .name = "evtchn", 21.527 - .fops = &evtchn_fops, 21.528 + .minor = EVTCHN_MINOR, 21.529 + .name = "evtchn", 21.530 + .fops = &evtchn_fops, 21.531 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 21.532 - .devfs_name = "misc/evtchn", 21.533 + .devfs_name = "misc/evtchn", 21.534 #endif 21.535 }; 21.536 21.537 static int __init evtchn_init(void) 21.538 { 21.539 -#ifdef OLD_DEVFS 21.540 - devfs_handle_t symlink_handle; 21.541 - int pos; 21.542 - char link_dest[64]; 21.543 -#endif 21.544 - int err; 21.545 - 21.546 - spin_lock_init(&port_user_lock); 21.547 - memset(port_user, 0, sizeof(port_user)); 21.548 + int err; 21.549 21.550 - /* (DEVFS) create '/dev/misc/evtchn'. */ 21.551 - err = misc_register(&evtchn_miscdev); 21.552 - if ( err != 0 ) 21.553 - { 21.554 - printk(KERN_ALERT "Could not register /dev/misc/evtchn\n"); 21.555 - return err; 21.556 - } 21.557 - 21.558 -#ifdef OLD_DEVFS 21.559 - /* (DEVFS) create directory '/dev/xen'. */ 21.560 - xen_dev_dir = devfs_mk_dir(NULL, "xen", NULL); 21.561 + spin_lock_init(&port_user_lock); 21.562 + memset(port_user, 0, sizeof(port_user)); 21.563 21.564 - /* (DEVFS) &link_dest[pos] == '../misc/evtchn'. */ 21.565 - pos = devfs_generate_path(evtchn_miscdev.devfs_handle, 21.566 - &link_dest[3], 21.567 - sizeof(link_dest) - 3); 21.568 - if ( pos >= 0 ) 21.569 - strncpy(&link_dest[pos], "../", 3); 21.570 + /* (DEVFS) create '/dev/misc/evtchn'. */ 21.571 + err = misc_register(&evtchn_miscdev); 21.572 + if (err != 0) 21.573 + { 21.574 + printk(KERN_ALERT "Could not register /dev/misc/evtchn\n"); 21.575 + return err; 21.576 + } 21.577 21.578 - /* (DEVFS) symlink '/dev/xen/evtchn' -> '../misc/evtchn'. */ 21.579 - (void)devfs_mk_symlink(xen_dev_dir, 21.580 - "evtchn", 21.581 - DEVFS_FL_DEFAULT, 21.582 - &link_dest[pos], 21.583 - &symlink_handle, 21.584 - NULL); 21.585 + printk("Event-channel device installed.\n"); 21.586 21.587 - /* (DEVFS) automatically destroy the symlink with its destination. */ 21.588 - devfs_auto_unregister(evtchn_miscdev.devfs_handle, symlink_handle); 21.589 -#endif 21.590 - 21.591 - printk("Event-channel device installed.\n"); 21.592 - 21.593 - return 0; 21.594 + return 0; 21.595 } 21.596 21.597 static void evtchn_cleanup(void) 21.598 { 21.599 - misc_deregister(&evtchn_miscdev); 21.600 + misc_deregister(&evtchn_miscdev); 21.601 } 21.602 21.603 module_init(evtchn_init); 21.604 module_exit(evtchn_cleanup); 21.605 + 21.606 +/* 21.607 + * Local variables: 21.608 + * c-file-style: "linux" 21.609 + * indent-tabs-mode: t 21.610 + * c-indent-level: 8 21.611 + * c-basic-offset: 8 21.612 + * tab-width: 8 21.613 + * End: 21.614 + */
22.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h Thu Sep 22 16:05:44 2005 +0100 22.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h Thu Sep 22 16:12:14 2005 +0100 22.3 @@ -62,9 +62,7 @@ typedef struct netif_st { 22.4 /* Private indexes into shared ring. */ 22.5 NETIF_RING_IDX rx_req_cons; 22.6 NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */ 22.7 -#ifdef CONFIG_XEN_NETDEV_GRANT 22.8 NETIF_RING_IDX rx_resp_prod_copy; 22.9 -#endif 22.10 NETIF_RING_IDX tx_req_cons; 22.11 NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */ 22.12
23.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Thu Sep 22 16:05:44 2005 +0100 23.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Thu Sep 22 16:12:14 2005 +0100 23.3 @@ -23,7 +23,7 @@ static void make_tx_response(netif_t *ne 23.4 static int make_rx_response(netif_t *netif, 23.5 u16 id, 23.6 s8 st, 23.7 - unsigned long addr, 23.8 + u16 offset, 23.9 u16 size, 23.10 u16 csum_valid); 23.11 23.12 @@ -41,11 +41,7 @@ static struct sk_buff_head rx_queue; 23.13 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2+1]; 23.14 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE]; 23.15 23.16 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.17 static gnttab_transfer_t grant_rx_op[MAX_PENDING_REQS]; 23.18 -#else 23.19 -static struct mmuext_op rx_mmuext[NETIF_RX_RING_SIZE]; 23.20 -#endif 23.21 static unsigned char rx_notify[NR_EVENT_CHANNELS]; 23.22 23.23 /* Don't currently gate addition of an interface to the tx scheduling list. */ 23.24 @@ -72,15 +68,10 @@ static PEND_RING_IDX dealloc_prod, deall 23.25 23.26 static struct sk_buff_head tx_queue; 23.27 23.28 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.29 static u16 grant_tx_ref[MAX_PENDING_REQS]; 23.30 static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS]; 23.31 static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS]; 23.32 23.33 -#else 23.34 -static multicall_entry_t tx_mcl[MAX_PENDING_REQS]; 23.35 -#endif 23.36 - 23.37 static struct list_head net_schedule_list; 23.38 static spinlock_t net_schedule_list_lock; 23.39 23.40 @@ -108,7 +99,7 @@ static unsigned long alloc_mfn(void) 23.41 return mfn; 23.42 } 23.43 23.44 -#ifndef CONFIG_XEN_NETDEV_GRANT 23.45 +#if 0 23.46 static void free_mfn(unsigned long mfn) 23.47 { 23.48 unsigned long flags; 23.49 @@ -180,18 +171,7 @@ int netif_be_start_xmit(struct sk_buff * 23.50 dev_kfree_skb(skb); 23.51 skb = nskb; 23.52 } 23.53 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.54 -#ifdef DEBUG_GRANT 23.55 - printk(KERN_ALERT "#### be_xmit: req_prod=%d req_cons=%d " 23.56 - "id=%04x gr=%04x\n", 23.57 - netif->rx->req_prod, 23.58 - netif->rx_req_cons, 23.59 - netif->rx->ring[ 23.60 - MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.id, 23.61 - netif->rx->ring[ 23.62 - MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.gref); 23.63 -#endif 23.64 -#endif 23.65 + 23.66 netif->rx_req_cons++; 23.67 netif_get(netif); 23.68 23.69 @@ -232,11 +212,7 @@ static void net_rx_action(unsigned long 23.70 u16 size, id, evtchn; 23.71 multicall_entry_t *mcl; 23.72 mmu_update_t *mmu; 23.73 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.74 gnttab_transfer_t *gop; 23.75 -#else 23.76 - struct mmuext_op *mmuext; 23.77 -#endif 23.78 unsigned long vdata, old_mfn, new_mfn; 23.79 struct sk_buff_head rxq; 23.80 struct sk_buff *skb; 23.81 @@ -247,11 +223,7 @@ static void net_rx_action(unsigned long 23.82 23.83 mcl = rx_mcl; 23.84 mmu = rx_mmu; 23.85 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.86 gop = grant_rx_op; 23.87 -#else 23.88 - mmuext = rx_mmuext; 23.89 -#endif 23.90 23.91 while ((skb = skb_dequeue(&rx_queue)) != NULL) { 23.92 netif = netdev_priv(skb->dev); 23.93 @@ -277,25 +249,13 @@ static void net_rx_action(unsigned long 23.94 pfn_pte_ma(new_mfn, PAGE_KERNEL), 0); 23.95 mcl++; 23.96 23.97 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.98 gop->mfn = old_mfn; 23.99 gop->domid = netif->domid; 23.100 gop->ref = netif->rx->ring[ 23.101 MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref; 23.102 netif->rx_resp_prod_copy++; 23.103 gop++; 23.104 -#else 23.105 - mcl->op = __HYPERVISOR_mmuext_op; 23.106 - mcl->args[0] = (unsigned long)mmuext; 23.107 - mcl->args[1] = 1; 23.108 - mcl->args[2] = 0; 23.109 - mcl->args[3] = netif->domid; 23.110 - mcl++; 23.111 23.112 - mmuext->cmd = MMUEXT_REASSIGN_PAGE; 23.113 - mmuext->arg1.mfn = old_mfn; 23.114 - mmuext++; 23.115 -#endif 23.116 mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) | 23.117 MMU_MACHPHYS_UPDATE; 23.118 mmu->val = __pa(vdata) >> PAGE_SHIFT; 23.119 @@ -303,9 +263,6 @@ static void net_rx_action(unsigned long 23.120 23.121 __skb_queue_tail(&rxq, skb); 23.122 23.123 -#ifdef DEBUG_GRANT 23.124 - dump_packet('a', old_mfn, vdata); 23.125 -#endif 23.126 /* Filled the batch queue? */ 23.127 if ((mcl - rx_mcl) == ARRAY_SIZE(rx_mcl)) 23.128 break; 23.129 @@ -321,17 +278,12 @@ static void net_rx_action(unsigned long 23.130 mcl->args[3] = DOMID_SELF; 23.131 mcl++; 23.132 23.133 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.134 mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 23.135 -#else 23.136 - mcl[-3].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 23.137 -#endif 23.138 BUG_ON(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0); 23.139 23.140 mcl = rx_mcl; 23.141 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.142 - if(HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 23.143 - gop - grant_rx_op)) { 23.144 + if( HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 23.145 + gop - grant_rx_op)) { 23.146 /* 23.147 * The other side has given us a bad grant ref, or has no 23.148 * headroom, or has gone away. Unfortunately the current grant 23.149 @@ -343,20 +295,14 @@ static void net_rx_action(unsigned long 23.150 grant_rx_op[0].domid, gop - grant_rx_op); 23.151 } 23.152 gop = grant_rx_op; 23.153 -#else 23.154 - mmuext = rx_mmuext; 23.155 -#endif 23.156 + 23.157 while ((skb = __skb_dequeue(&rxq)) != NULL) { 23.158 netif = netdev_priv(skb->dev); 23.159 size = skb->tail - skb->data; 23.160 23.161 /* Rederive the machine addresses. */ 23.162 new_mfn = mcl[0].args[1] >> PAGE_SHIFT; 23.163 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.164 old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */ 23.165 -#else 23.166 - old_mfn = mmuext[0].arg1.mfn; 23.167 -#endif 23.168 atomic_set(&(skb_shinfo(skb)->dataref), 1); 23.169 skb_shinfo(skb)->nr_frags = 0; 23.170 skb_shinfo(skb)->frag_list = NULL; 23.171 @@ -369,27 +315,17 @@ static void net_rx_action(unsigned long 23.172 23.173 /* Check the reassignment error code. */ 23.174 status = NETIF_RSP_OKAY; 23.175 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.176 if(gop->status != 0) { 23.177 DPRINTK("Bad status %d from grant transfer to DOM%u\n", 23.178 gop->status, netif->domid); 23.179 /* XXX SMH: should free 'old_mfn' here */ 23.180 status = NETIF_RSP_ERROR; 23.181 } 23.182 -#else 23.183 - if (unlikely(mcl[1].result != 0)) { 23.184 - DPRINTK("Failed MMU update transferring to DOM%u\n", 23.185 - netif->domid); 23.186 - free_mfn(old_mfn); 23.187 - status = NETIF_RSP_ERROR; 23.188 - } 23.189 -#endif 23.190 evtchn = netif->evtchn; 23.191 id = netif->rx->ring[ 23.192 MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id; 23.193 if (make_rx_response(netif, id, status, 23.194 - (old_mfn << PAGE_SHIFT) | /* XXX */ 23.195 - ((unsigned long)skb->data & ~PAGE_MASK), 23.196 + (unsigned long)skb->data & ~PAGE_MASK, 23.197 size, skb->proto_csum_valid) && 23.198 (rx_notify[evtchn] == 0)) { 23.199 rx_notify[evtchn] = 1; 23.200 @@ -398,13 +334,8 @@ static void net_rx_action(unsigned long 23.201 23.202 netif_put(netif); 23.203 dev_kfree_skb(skb); 23.204 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.205 mcl++; 23.206 gop++; 23.207 -#else 23.208 - mcl += 2; 23.209 - mmuext += 1; 23.210 -#endif 23.211 } 23.212 23.213 while (notify_nr != 0) { 23.214 @@ -486,11 +417,7 @@ static void tx_credit_callback(unsigned 23.215 23.216 inline static void net_tx_action_dealloc(void) 23.217 { 23.218 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.219 gnttab_unmap_grant_ref_t *gop; 23.220 -#else 23.221 - multicall_entry_t *mcl; 23.222 -#endif 23.223 u16 pending_idx; 23.224 PEND_RING_IDX dc, dp; 23.225 netif_t *netif; 23.226 @@ -498,7 +425,6 @@ inline static void net_tx_action_dealloc 23.227 dc = dealloc_cons; 23.228 dp = dealloc_prod; 23.229 23.230 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.231 /* 23.232 * Free up any grants we have finished using 23.233 */ 23.234 @@ -513,26 +439,8 @@ inline static void net_tx_action_dealloc 23.235 } 23.236 BUG_ON(HYPERVISOR_grant_table_op( 23.237 GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops)); 23.238 -#else 23.239 - mcl = tx_mcl; 23.240 - while (dc != dp) { 23.241 - pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)]; 23.242 - MULTI_update_va_mapping(mcl, MMAP_VADDR(pending_idx), 23.243 - __pte(0), 0); 23.244 - mcl++; 23.245 - } 23.246 23.247 - mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 23.248 - BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0); 23.249 - 23.250 - mcl = tx_mcl; 23.251 -#endif 23.252 while (dealloc_cons != dp) { 23.253 -#ifndef CONFIG_XEN_NETDEV_GRANT 23.254 - /* The update_va_mapping() must not fail. */ 23.255 - BUG_ON(mcl[0].result != 0); 23.256 -#endif 23.257 - 23.258 pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)]; 23.259 23.260 netif = pending_tx_info[pending_idx].netif; 23.261 @@ -556,10 +464,6 @@ inline static void net_tx_action_dealloc 23.262 add_to_net_schedule_list_tail(netif); 23.263 23.264 netif_put(netif); 23.265 - 23.266 -#ifndef CONFIG_XEN_NETDEV_GRANT 23.267 - mcl++; 23.268 -#endif 23.269 } 23.270 } 23.271 23.272 @@ -572,21 +476,13 @@ static void net_tx_action(unsigned long 23.273 netif_tx_request_t txreq; 23.274 u16 pending_idx; 23.275 NETIF_RING_IDX i; 23.276 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.277 gnttab_map_grant_ref_t *mop; 23.278 -#else 23.279 - multicall_entry_t *mcl; 23.280 -#endif 23.281 unsigned int data_len; 23.282 23.283 if (dealloc_cons != dealloc_prod) 23.284 net_tx_action_dealloc(); 23.285 23.286 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.287 mop = tx_map_ops; 23.288 -#else 23.289 - mcl = tx_mcl; 23.290 -#endif 23.291 while ((NR_PENDING_REQS < MAX_PENDING_REQS) && 23.292 !list_empty(&net_schedule_list)) { 23.293 /* Get a netif from the list with work to do. */ 23.294 @@ -657,8 +553,7 @@ static void net_tx_action(unsigned long 23.295 } 23.296 23.297 /* No crossing a page as the payload mustn't fragment. */ 23.298 - if (unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= 23.299 - PAGE_SIZE)) { 23.300 + if (unlikely((txreq.offset + txreq.size) >= PAGE_SIZE)) { 23.301 DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 23.302 txreq.addr, txreq.size, 23.303 (txreq.addr &~PAGE_MASK) + txreq.size); 23.304 @@ -682,20 +577,12 @@ static void net_tx_action(unsigned long 23.305 23.306 /* Packets passed to netif_rx() must have some headroom. */ 23.307 skb_reserve(skb, 16); 23.308 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.309 + 23.310 mop->host_addr = MMAP_VADDR(pending_idx); 23.311 mop->dom = netif->domid; 23.312 - mop->ref = txreq.addr >> PAGE_SHIFT; 23.313 + mop->ref = txreq.gref; 23.314 mop->flags = GNTMAP_host_map | GNTMAP_readonly; 23.315 mop++; 23.316 -#else 23.317 - MULTI_update_va_mapping_otherdomain( 23.318 - mcl, MMAP_VADDR(pending_idx), 23.319 - pfn_pte_ma(txreq.addr >> PAGE_SHIFT, PAGE_KERNEL), 23.320 - 0, netif->domid); 23.321 - 23.322 - mcl++; 23.323 -#endif 23.324 23.325 memcpy(&pending_tx_info[pending_idx].req, 23.326 &txreq, sizeof(txreq)); 23.327 @@ -706,17 +593,10 @@ static void net_tx_action(unsigned long 23.328 23.329 pending_cons++; 23.330 23.331 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.332 if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops)) 23.333 break; 23.334 -#else 23.335 - /* Filled the batch queue? */ 23.336 - if ((mcl - tx_mcl) == ARRAY_SIZE(tx_mcl)) 23.337 - break; 23.338 -#endif 23.339 } 23.340 23.341 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.342 if (mop == tx_map_ops) 23.343 return; 23.344 23.345 @@ -724,14 +604,6 @@ static void net_tx_action(unsigned long 23.346 GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops)); 23.347 23.348 mop = tx_map_ops; 23.349 -#else 23.350 - if (mcl == tx_mcl) 23.351 - return; 23.352 - 23.353 - BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0); 23.354 - 23.355 - mcl = tx_mcl; 23.356 -#endif 23.357 while ((skb = __skb_dequeue(&tx_queue)) != NULL) { 23.358 pending_idx = *((u16 *)skb->data); 23.359 netif = pending_tx_info[pending_idx].netif; 23.360 @@ -739,7 +611,6 @@ static void net_tx_action(unsigned long 23.361 sizeof(txreq)); 23.362 23.363 /* Check the remap error code. */ 23.364 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.365 if (unlikely(mop->handle < 0)) { 23.366 printk(KERN_ALERT "#### netback grant fails\n"); 23.367 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR); 23.368 @@ -754,30 +625,13 @@ static void net_tx_action(unsigned long 23.369 __pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] = 23.370 FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT); 23.371 grant_tx_ref[pending_idx] = mop->handle; 23.372 -#else 23.373 - if (unlikely(mcl[0].result != 0)) { 23.374 - DPRINTK("Bad page frame\n"); 23.375 - make_tx_response(netif, txreq.id, NETIF_RSP_ERROR); 23.376 - netif_put(netif); 23.377 - kfree_skb(skb); 23.378 - mcl++; 23.379 - pending_ring[MASK_PEND_IDX(pending_prod++)] = 23.380 - pending_idx; 23.381 - continue; 23.382 - } 23.383 - 23.384 - phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> 23.385 - PAGE_SHIFT] = 23.386 - FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT); 23.387 -#endif 23.388 23.389 data_len = (txreq.size > PKT_PROT_LEN) ? 23.390 PKT_PROT_LEN : txreq.size; 23.391 23.392 __skb_put(skb, data_len); 23.393 memcpy(skb->data, 23.394 - (void *)(MMAP_VADDR(pending_idx)| 23.395 - (txreq.addr&~PAGE_MASK)), 23.396 + (void *)(MMAP_VADDR(pending_idx)|txreq.offset), 23.397 data_len); 23.398 if (data_len < txreq.size) { 23.399 /* Append the packet payload as a fragment. */ 23.400 @@ -786,7 +640,7 @@ static void net_tx_action(unsigned long 23.401 skb_shinfo(skb)->frags[0].size = 23.402 txreq.size - data_len; 23.403 skb_shinfo(skb)->frags[0].page_offset = 23.404 - (txreq.addr + data_len) & ~PAGE_MASK; 23.405 + txreq.offset + data_len; 23.406 skb_shinfo(skb)->nr_frags = 1; 23.407 } else { 23.408 /* Schedule a response immediately. */ 23.409 @@ -813,11 +667,7 @@ static void net_tx_action(unsigned long 23.410 netif_rx(skb); 23.411 netif->dev->last_rx = jiffies; 23.412 23.413 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.414 mop++; 23.415 -#else 23.416 - mcl++; 23.417 -#endif 23.418 } 23.419 } 23.420 23.421 @@ -874,7 +724,7 @@ static void make_tx_response(netif_t *ne 23.422 static int make_rx_response(netif_t *netif, 23.423 u16 id, 23.424 s8 st, 23.425 - unsigned long addr, 23.426 + u16 offset, 23.427 u16 size, 23.428 u16 csum_valid) 23.429 { 23.430 @@ -882,7 +732,7 @@ static int make_rx_response(netif_t *net 23.431 netif_rx_response_t *resp; 23.432 23.433 resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp; 23.434 - resp->addr = addr; 23.435 + resp->offset = offset; 23.436 resp->csum_valid = csum_valid; 23.437 resp->id = id; 23.438 resp->status = (s16)size; 23.439 @@ -937,9 +787,6 @@ static int __init netback_init(void) 23.440 return 0; 23.441 23.442 IPRINTK("Initialising Xen netif backend.\n"); 23.443 -#ifdef CONFIG_XEN_NETDEV_GRANT 23.444 - IPRINTK("Using grant tables.\n"); 23.445 -#endif 23.446 23.447 /* We can increase reservation by this much in net_rx_action(). */ 23.448 balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
24.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Thu Sep 22 16:05:44 2005 +0100 24.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Thu Sep 22 16:12:14 2005 +0100 24.3 @@ -256,8 +256,8 @@ static void network_tx_buf_gc(struct net 24.4 for (i = np->tx_resp_cons; i != prod; i++) { 24.5 id = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id; 24.6 skb = np->tx_skbs[id]; 24.7 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.8 - if (unlikely(gnttab_query_foreign_access(np->grant_tx_ref[id]) != 0)) { 24.9 + if (unlikely(gnttab_query_foreign_access( 24.10 + np->grant_tx_ref[id]) != 0)) { 24.11 printk(KERN_ALERT "network_tx_buf_gc: warning " 24.12 "-- grant still in use by backend " 24.13 "domain.\n"); 24.14 @@ -268,7 +268,6 @@ static void network_tx_buf_gc(struct net 24.15 gnttab_release_grant_reference( 24.16 &np->gref_tx_head, np->grant_tx_ref[id]); 24.17 np->grant_tx_ref[id] = GRANT_INVALID_REF; 24.18 -#endif 24.19 ADD_ID_TO_FREELIST(np->tx_skbs, id); 24.20 dev_kfree_skb_irq(skb); 24.21 } 24.22 @@ -287,10 +286,7 @@ static void network_tx_buf_gc(struct net 24.23 mb(); 24.24 } while (prod != np->tx->resp_prod); 24.25 24.26 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.27 out: 24.28 -#endif 24.29 - 24.30 if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) { 24.31 np->tx_full = 0; 24.32 if (np->user_state == UST_OPEN) 24.33 @@ -307,9 +303,7 @@ static void network_alloc_rx_buffers(str 24.34 int i, batch_target; 24.35 NETIF_RING_IDX req_prod = np->rx->req_prod; 24.36 struct xen_memory_reservation reservation; 24.37 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.38 grant_ref_t ref; 24.39 -#endif 24.40 24.41 if (unlikely(np->backend_state != BEST_CONNECTED)) 24.42 return; 24.43 @@ -343,13 +337,11 @@ static void network_alloc_rx_buffers(str 24.44 np->rx_skbs[id] = skb; 24.45 24.46 np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id; 24.47 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.48 ref = gnttab_claim_grant_reference(&np->gref_rx_head); 24.49 BUG_ON((signed short)ref < 0); 24.50 np->grant_rx_ref[id] = ref; 24.51 gnttab_grant_foreign_transfer_ref(ref, np->backend_id); 24.52 np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref; 24.53 -#endif 24.54 rx_pfn_array[i] = virt_to_mfn(skb->head); 24.55 24.56 /* Remove this page from map before passing back to Xen. */ 24.57 @@ -400,10 +392,8 @@ static int network_start_xmit(struct sk_ 24.58 struct net_private *np = netdev_priv(dev); 24.59 netif_tx_request_t *tx; 24.60 NETIF_RING_IDX i; 24.61 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.62 grant_ref_t ref; 24.63 unsigned long mfn; 24.64 -#endif 24.65 24.66 if (unlikely(np->tx_full)) { 24.67 printk(KERN_ALERT "%s: full queue wasn't stopped!\n", 24.68 @@ -439,18 +429,13 @@ static int network_start_xmit(struct sk_ 24.69 tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req; 24.70 24.71 tx->id = id; 24.72 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.73 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 24.74 BUG_ON((signed short)ref < 0); 24.75 mfn = virt_to_mfn(skb->data); 24.76 gnttab_grant_foreign_access_ref( 24.77 ref, np->backend_id, mfn, GNTMAP_readonly); 24.78 - tx->addr = ref << PAGE_SHIFT; 24.79 - np->grant_tx_ref[id] = ref; 24.80 -#else 24.81 - tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT; 24.82 -#endif 24.83 - tx->addr |= (unsigned long)skb->data & ~PAGE_MASK; 24.84 + tx->gref = np->grant_tx_ref[id] = ref; 24.85 + tx->offset = (unsigned long)skb->data & ~PAGE_MASK; 24.86 tx->size = skb->len; 24.87 tx->csum_blank = (skb->ip_summed == CHECKSUM_HW); 24.88 24.89 @@ -511,10 +496,8 @@ static int netif_poll(struct net_device 24.90 int work_done, budget, more_to_do = 1; 24.91 struct sk_buff_head rxq; 24.92 unsigned long flags; 24.93 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.94 unsigned long mfn; 24.95 grant_ref_t ref; 24.96 -#endif 24.97 24.98 spin_lock(&np->rx_lock); 24.99 24.100 @@ -550,7 +533,6 @@ static int netif_poll(struct net_device 24.101 continue; 24.102 } 24.103 24.104 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.105 ref = np->grant_rx_ref[rx->id]; 24.106 24.107 if(ref == GRANT_INVALID_REF) { 24.108 @@ -568,17 +550,12 @@ static int netif_poll(struct net_device 24.109 np->grant_rx_ref[rx->id] = GRANT_INVALID_REF; 24.110 mfn = gnttab_end_foreign_transfer_ref(ref); 24.111 gnttab_release_grant_reference(&np->gref_rx_head, ref); 24.112 -#endif 24.113 24.114 skb = np->rx_skbs[rx->id]; 24.115 ADD_ID_TO_FREELIST(np->rx_skbs, rx->id); 24.116 24.117 /* NB. We handle skb overflow later. */ 24.118 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.119 - skb->data = skb->head + rx->addr; 24.120 -#else 24.121 - skb->data = skb->head + (rx->addr & ~PAGE_MASK); 24.122 -#endif 24.123 + skb->data = skb->head + rx->offset; 24.124 skb->len = rx->status; 24.125 skb->tail = skb->data + skb->len; 24.126 24.127 @@ -589,30 +566,14 @@ static int netif_poll(struct net_device 24.128 np->stats.rx_bytes += rx->status; 24.129 24.130 /* Remap the page. */ 24.131 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.132 mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 24.133 -#else 24.134 - mmu->ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE; 24.135 -#endif 24.136 mmu->val = __pa(skb->head) >> PAGE_SHIFT; 24.137 mmu++; 24.138 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.139 MULTI_update_va_mapping(mcl, (unsigned long)skb->head, 24.140 pfn_pte_ma(mfn, PAGE_KERNEL), 0); 24.141 -#else 24.142 - MULTI_update_va_mapping(mcl, (unsigned long)skb->head, 24.143 - pfn_pte_ma(rx->addr >> PAGE_SHIFT, 24.144 - PAGE_KERNEL), 0); 24.145 -#endif 24.146 mcl++; 24.147 24.148 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.149 phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = mfn; 24.150 -#else 24.151 - phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 24.152 - rx->addr >> PAGE_SHIFT; 24.153 -#endif 24.154 - 24.155 24.156 __skb_queue_tail(&rxq, skb); 24.157 } 24.158 @@ -773,16 +734,12 @@ static void network_connect(struct net_d 24.159 tx = &np->tx->ring[requeue_idx++].req; 24.160 24.161 tx->id = i; 24.162 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.163 gnttab_grant_foreign_access_ref( 24.164 np->grant_tx_ref[i], np->backend_id, 24.165 virt_to_mfn(np->tx_skbs[i]->data), 24.166 GNTMAP_readonly); 24.167 - tx->addr = np->grant_tx_ref[i] << PAGE_SHIFT; 24.168 -#else 24.169 - tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT; 24.170 -#endif 24.171 - tx->addr |= (unsigned long)skb->data & ~PAGE_MASK; 24.172 + tx->gref = np->grant_tx_ref[i]; 24.173 + tx->offset = (unsigned long)skb->data & ~PAGE_MASK; 24.174 tx->size = skb->len; 24.175 24.176 np->stats.tx_bytes += skb->len; 24.177 @@ -795,12 +752,10 @@ static void network_connect(struct net_d 24.178 /* Rebuild the RX buffer freelist and the RX ring itself. */ 24.179 for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 24.180 if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET) { 24.181 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.182 gnttab_grant_foreign_transfer_ref( 24.183 np->grant_rx_ref[i], np->backend_id); 24.184 np->rx->ring[requeue_idx].req.gref = 24.185 np->grant_rx_ref[i]; 24.186 -#endif 24.187 np->rx->ring[requeue_idx].req.id = i; 24.188 requeue_idx++; 24.189 } 24.190 @@ -862,11 +817,9 @@ connect_device(struct net_private *np, u 24.191 24.192 static void netif_uninit(struct net_device *dev) 24.193 { 24.194 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.195 struct net_private *np = netdev_priv(dev); 24.196 gnttab_free_grant_references(np->gref_tx_head); 24.197 gnttab_free_grant_references(np->gref_rx_head); 24.198 -#endif 24.199 } 24.200 24.201 static struct ethtool_ops network_ethtool_ops = 24.202 @@ -911,19 +864,14 @@ static int create_netdev(int handle, str 24.203 /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ 24.204 for (i = 0; i <= NETIF_TX_RING_SIZE; i++) { 24.205 np->tx_skbs[i] = (void *)((unsigned long) i+1); 24.206 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.207 np->grant_tx_ref[i] = GRANT_INVALID_REF; 24.208 -#endif 24.209 } 24.210 24.211 for (i = 0; i <= NETIF_RX_RING_SIZE; i++) { 24.212 np->rx_skbs[i] = (void *)((unsigned long) i+1); 24.213 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.214 np->grant_rx_ref[i] = GRANT_INVALID_REF; 24.215 -#endif 24.216 } 24.217 24.218 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.219 /* A grant for every tx ring slot */ 24.220 if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE, 24.221 &np->gref_tx_head) < 0) { 24.222 @@ -937,7 +885,6 @@ static int create_netdev(int handle, str 24.223 gnttab_free_grant_references(np->gref_tx_head); 24.224 goto exit; 24.225 } 24.226 -#endif 24.227 24.228 netdev->open = network_open; 24.229 netdev->hard_start_xmit = network_start_xmit; 24.230 @@ -971,10 +918,8 @@ static int create_netdev(int handle, str 24.231 return err; 24.232 24.233 exit_free_grefs: 24.234 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.235 gnttab_free_grant_references(np->gref_tx_head); 24.236 gnttab_free_grant_references(np->gref_rx_head); 24.237 -#endif 24.238 goto exit; 24.239 } 24.240 24.241 @@ -1024,10 +969,8 @@ static int setup_device(struct xenbus_de 24.242 evtchn_op_t op = { .cmd = EVTCHNOP_alloc_unbound }; 24.243 int err; 24.244 24.245 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.246 info->tx_ring_ref = GRANT_INVALID_REF; 24.247 info->rx_ring_ref = GRANT_INVALID_REF; 24.248 -#endif 24.249 24.250 info->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL); 24.251 if (info->tx == 0) { 24.252 @@ -1045,7 +988,6 @@ static int setup_device(struct xenbus_de 24.253 memset(info->rx, 0, PAGE_SIZE); 24.254 info->backend_state = BEST_DISCONNECTED; 24.255 24.256 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.257 err = gnttab_grant_foreign_access(info->backend_id, 24.258 virt_to_mfn(info->tx), 0); 24.259 if (err < 0) { 24.260 @@ -1062,11 +1004,6 @@ static int setup_device(struct xenbus_de 24.261 } 24.262 info->rx_ring_ref = err; 24.263 24.264 -#else 24.265 - info->tx_ring_ref = virt_to_mfn(info->tx); 24.266 - info->rx_ring_ref = virt_to_mfn(info->rx); 24.267 -#endif 24.268 - 24.269 op.u.alloc_unbound.dom = info->backend_id; 24.270 err = HYPERVISOR_event_channel_op(&op); 24.271 if (err) { 24.272 @@ -1084,7 +1021,6 @@ static int setup_device(struct xenbus_de 24.273 free_page((unsigned long)info->rx); 24.274 info->rx = 0; 24.275 24.276 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.277 if (info->tx_ring_ref != GRANT_INVALID_REF) 24.278 gnttab_end_foreign_access(info->tx_ring_ref, 0); 24.279 info->tx_ring_ref = GRANT_INVALID_REF; 24.280 @@ -1092,7 +1028,6 @@ static int setup_device(struct xenbus_de 24.281 if (info->rx_ring_ref != GRANT_INVALID_REF) 24.282 gnttab_end_foreign_access(info->rx_ring_ref, 0); 24.283 info->rx_ring_ref = GRANT_INVALID_REF; 24.284 -#endif 24.285 24.286 return err; 24.287 } 24.288 @@ -1106,7 +1041,6 @@ static void netif_free(struct netfront_i 24.289 free_page((unsigned long)info->rx); 24.290 info->rx = 0; 24.291 24.292 -#ifdef CONFIG_XEN_NETDEV_GRANT 24.293 if (info->tx_ring_ref != GRANT_INVALID_REF) 24.294 gnttab_end_foreign_access(info->tx_ring_ref, 0); 24.295 info->tx_ring_ref = GRANT_INVALID_REF; 24.296 @@ -1114,7 +1048,6 @@ static void netif_free(struct netfront_i 24.297 if (info->rx_ring_ref != GRANT_INVALID_REF) 24.298 gnttab_end_foreign_access(info->rx_ring_ref, 0); 24.299 info->rx_ring_ref = GRANT_INVALID_REF; 24.300 -#endif 24.301 24.302 unbind_evtchn_from_irqhandler(info->evtchn, info->netdev); 24.303 info->evtchn = 0;
25.1 --- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c Thu Sep 22 16:05:44 2005 +0100 25.2 +++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c Thu Sep 22 16:12:14 2005 +0100 25.3 @@ -41,232 +41,253 @@ static struct proc_dir_entry *privcmd_in 25.4 static int privcmd_ioctl(struct inode *inode, struct file *file, 25.5 unsigned int cmd, unsigned long data) 25.6 { 25.7 - int ret = -ENOSYS; 25.8 + int ret = -ENOSYS; 25.9 25.10 - switch ( cmd ) 25.11 - { 25.12 - case IOCTL_PRIVCMD_HYPERCALL: 25.13 - { 25.14 - privcmd_hypercall_t hypercall; 25.15 + switch (cmd) { 25.16 + case IOCTL_PRIVCMD_HYPERCALL: { 25.17 + privcmd_hypercall_t hypercall; 25.18 25.19 - if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) ) 25.20 - return -EFAULT; 25.21 + if (copy_from_user(&hypercall, (void *)data, 25.22 + sizeof(hypercall))) 25.23 + return -EFAULT; 25.24 25.25 #if defined(__i386__) 25.26 - __asm__ __volatile__ ( 25.27 - "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; " 25.28 - "movl 4(%%eax),%%ebx ;" 25.29 - "movl 8(%%eax),%%ecx ;" 25.30 - "movl 12(%%eax),%%edx ;" 25.31 - "movl 16(%%eax),%%esi ;" 25.32 - "movl 20(%%eax),%%edi ;" 25.33 - "movl (%%eax),%%eax ;" 25.34 - TRAP_INSTR "; " 25.35 - "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx" 25.36 - : "=a" (ret) : "0" (&hypercall) : "memory" ); 25.37 + __asm__ __volatile__ ( 25.38 + "pushl %%ebx; pushl %%ecx; pushl %%edx; " 25.39 + "pushl %%esi; pushl %%edi; " 25.40 + "movl 4(%%eax),%%ebx ;" 25.41 + "movl 8(%%eax),%%ecx ;" 25.42 + "movl 12(%%eax),%%edx ;" 25.43 + "movl 16(%%eax),%%esi ;" 25.44 + "movl 20(%%eax),%%edi ;" 25.45 + "movl (%%eax),%%eax ;" 25.46 + TRAP_INSTR "; " 25.47 + "popl %%edi; popl %%esi; popl %%edx; " 25.48 + "popl %%ecx; popl %%ebx" 25.49 + : "=a" (ret) : "0" (&hypercall) : "memory" ); 25.50 #elif defined (__x86_64__) 25.51 - { 25.52 - long ign1, ign2, ign3; 25.53 - __asm__ __volatile__ ( 25.54 - "movq %8,%%r10; movq %9,%%r8;" TRAP_INSTR 25.55 - : "=a" (ret), "=D" (ign1), "=S" (ign2), "=d" (ign3) 25.56 - : "0" ((unsigned long)hypercall.op), 25.57 - "1" ((unsigned long)hypercall.arg[0]), 25.58 - "2" ((unsigned long)hypercall.arg[1]), 25.59 - "3" ((unsigned long)hypercall.arg[2]), 25.60 - "g" ((unsigned long)hypercall.arg[3]), 25.61 - "g" ((unsigned long)hypercall.arg[4]) 25.62 - : "r11","rcx","r8","r10","memory"); 25.63 - } 25.64 + { 25.65 + long ign1, ign2, ign3; 25.66 + __asm__ __volatile__ ( 25.67 + "movq %8,%%r10; movq %9,%%r8;" TRAP_INSTR 25.68 + : "=a" (ret), "=D" (ign1), 25.69 + "=S" (ign2), "=d" (ign3) 25.70 + : "0" ((unsigned long)hypercall.op), 25.71 + "1" ((unsigned long)hypercall.arg[0]), 25.72 + "2" ((unsigned long)hypercall.arg[1]), 25.73 + "3" ((unsigned long)hypercall.arg[2]), 25.74 + "g" ((unsigned long)hypercall.arg[3]), 25.75 + "g" ((unsigned long)hypercall.arg[4]) 25.76 + : "r11","rcx","r8","r10","memory"); 25.77 + } 25.78 #elif defined (__ia64__) 25.79 - __asm__ __volatile__ ( 25.80 - ";; mov r14=%2; mov r15=%3; mov r16=%4; mov r17=%5; mov r18=%6;" 25.81 - "mov r2=%1; break 0x1000;; mov %0=r8 ;;" 25.82 - : "=r" (ret) 25.83 - : "r" (hypercall.op), 25.84 - "r" (hypercall.arg[0]), 25.85 - "r" (hypercall.arg[1]), 25.86 - "r" (hypercall.arg[2]), 25.87 - "r" (hypercall.arg[3]), 25.88 - "r" (hypercall.arg[4]) 25.89 - : "r14","r15","r16","r17","r18","r2","r8","memory"); 25.90 + __asm__ __volatile__ ( 25.91 + ";; mov r14=%2; mov r15=%3; " 25.92 + "mov r16=%4; mov r17=%5; mov r18=%6;" 25.93 + "mov r2=%1; break 0x1000;; mov %0=r8 ;;" 25.94 + : "=r" (ret) 25.95 + : "r" (hypercall.op), 25.96 + "r" (hypercall.arg[0]), 25.97 + "r" (hypercall.arg[1]), 25.98 + "r" (hypercall.arg[2]), 25.99 + "r" (hypercall.arg[3]), 25.100 + "r" (hypercall.arg[4]) 25.101 + : "r14","r15","r16","r17","r18","r2","r8","memory"); 25.102 #endif 25.103 - } 25.104 - break; 25.105 + } 25.106 + break; 25.107 25.108 #if defined(CONFIG_XEN_PRIVILEGED_GUEST) 25.109 - case IOCTL_PRIVCMD_MMAP: 25.110 - { 25.111 + case IOCTL_PRIVCMD_MMAP: { 25.112 #define PRIVCMD_MMAP_SZ 32 25.113 - privcmd_mmap_t mmapcmd; 25.114 - privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p; 25.115 - int i, rc; 25.116 + privcmd_mmap_t mmapcmd; 25.117 + privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p; 25.118 + int i, rc; 25.119 25.120 - if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) ) 25.121 - return -EFAULT; 25.122 + if (copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd))) 25.123 + return -EFAULT; 25.124 25.125 - p = mmapcmd.entry; 25.126 + p = mmapcmd.entry; 25.127 25.128 - for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ) 25.129 - { 25.130 - int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)? 25.131 - PRIVCMD_MMAP_SZ:(mmapcmd.num-i); 25.132 + for (i = 0; i < mmapcmd.num; 25.133 + i += PRIVCMD_MMAP_SZ, p += PRIVCMD_MMAP_SZ) { 25.134 + int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)? 25.135 + PRIVCMD_MMAP_SZ:(mmapcmd.num-i); 25.136 25.137 - 25.138 - if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) ) 25.139 - return -EFAULT; 25.140 + if (copy_from_user(&msg, p, 25.141 + n*sizeof(privcmd_mmap_entry_t))) 25.142 + return -EFAULT; 25.143 25.144 - for ( j = 0; j < n; j++ ) 25.145 - { 25.146 - struct vm_area_struct *vma = 25.147 - find_vma( current->mm, msg[j].va ); 25.148 + for (j = 0; j < n; j++) { 25.149 + struct vm_area_struct *vma = 25.150 + find_vma( current->mm, msg[j].va ); 25.151 + 25.152 + if (!vma) 25.153 + return -EINVAL; 25.154 25.155 - if ( !vma ) 25.156 - return -EINVAL; 25.157 + if (msg[j].va > PAGE_OFFSET) 25.158 + return -EINVAL; 25.159 25.160 - if ( msg[j].va > PAGE_OFFSET ) 25.161 - return -EINVAL; 25.162 + if ((msg[j].va + (msg[j].npages << PAGE_SHIFT)) 25.163 + > vma->vm_end ) 25.164 + return -EINVAL; 25.165 25.166 - if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end ) 25.167 - return -EINVAL; 25.168 - 25.169 - if ( (rc = direct_remap_pfn_range(vma, 25.170 - msg[j].va&PAGE_MASK, 25.171 - msg[j].mfn, 25.172 - msg[j].npages<<PAGE_SHIFT, 25.173 - vma->vm_page_prot, 25.174 - mmapcmd.dom)) < 0 ) 25.175 - return rc; 25.176 - } 25.177 - } 25.178 - ret = 0; 25.179 - } 25.180 - break; 25.181 + if ((rc = direct_remap_pfn_range( 25.182 + vma, 25.183 + msg[j].va&PAGE_MASK, 25.184 + msg[j].mfn, 25.185 + msg[j].npages<<PAGE_SHIFT, 25.186 + vma->vm_page_prot, 25.187 + mmapcmd.dom)) < 0) 25.188 + return rc; 25.189 + } 25.190 + } 25.191 + ret = 0; 25.192 + } 25.193 + break; 25.194 25.195 - case IOCTL_PRIVCMD_MMAPBATCH: 25.196 - { 25.197 - mmu_update_t u; 25.198 - privcmd_mmapbatch_t m; 25.199 - struct vm_area_struct *vma = NULL; 25.200 - unsigned long *p, addr; 25.201 - unsigned long mfn, ptep; 25.202 - int i; 25.203 + case IOCTL_PRIVCMD_MMAPBATCH: { 25.204 + mmu_update_t u; 25.205 + privcmd_mmapbatch_t m; 25.206 + struct vm_area_struct *vma = NULL; 25.207 + unsigned long *p, addr; 25.208 + unsigned long mfn, ptep; 25.209 + int i; 25.210 + 25.211 + if (copy_from_user(&m, (void *)data, sizeof(m))) { 25.212 + ret = -EFAULT; 25.213 + goto batch_err; 25.214 + } 25.215 25.216 - if ( copy_from_user(&m, (void *)data, sizeof(m)) ) 25.217 - { ret = -EFAULT; goto batch_err; } 25.218 - 25.219 - vma = find_vma( current->mm, m.addr ); 25.220 + vma = find_vma( current->mm, m.addr ); 25.221 + if (!vma) { 25.222 + ret = -EINVAL; 25.223 + goto batch_err; 25.224 + } 25.225 25.226 - if ( !vma ) 25.227 - { ret = -EINVAL; goto batch_err; } 25.228 + if (m.addr > PAGE_OFFSET) { 25.229 + ret = -EFAULT; 25.230 + goto batch_err; 25.231 + } 25.232 25.233 - if ( m.addr > PAGE_OFFSET ) 25.234 - { ret = -EFAULT; goto batch_err; } 25.235 - 25.236 - if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end ) 25.237 - { ret = -EFAULT; goto batch_err; } 25.238 + if ((m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end) { 25.239 + ret = -EFAULT; 25.240 + goto batch_err; 25.241 + } 25.242 25.243 - p = m.arr; 25.244 - addr = m.addr; 25.245 - for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ ) 25.246 - { 25.247 - if ( get_user(mfn, p) ) 25.248 - return -EFAULT; 25.249 + p = m.arr; 25.250 + addr = m.addr; 25.251 + for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) { 25.252 + if (get_user(mfn, p)) 25.253 + return -EFAULT; 25.254 25.255 - ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep); 25.256 - if (ret) 25.257 - goto batch_err; 25.258 + ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep); 25.259 + if (ret) 25.260 + goto batch_err; 25.261 25.262 - u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot)); 25.263 - u.ptr = ptep; 25.264 + u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot)); 25.265 + u.ptr = ptep; 25.266 25.267 - if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0) ) 25.268 - put_user(0xF0000000 | mfn, p); 25.269 - } 25.270 + if (HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0) 25.271 + put_user(0xF0000000 | mfn, p); 25.272 + } 25.273 25.274 - ret = 0; 25.275 - break; 25.276 + ret = 0; 25.277 + break; 25.278 25.279 - batch_err: 25.280 - printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n", 25.281 - ret, vma, m.addr, m.num, m.arr, 25.282 - vma ? vma->vm_start : 0, vma ? vma->vm_end : 0); 25.283 - break; 25.284 - } 25.285 - break; 25.286 + batch_err: 25.287 + printk("batch_err ret=%d vma=%p addr=%lx " 25.288 + "num=%d arr=%p %lx-%lx\n", 25.289 + ret, vma, m.addr, m.num, m.arr, 25.290 + vma ? vma->vm_start : 0, vma ? vma->vm_end : 0); 25.291 + break; 25.292 + } 25.293 + break; 25.294 #endif 25.295 25.296 - case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN: 25.297 - { 25.298 - unsigned long m2pv = (unsigned long)machine_to_phys_mapping; 25.299 - pgd_t *pgd = pgd_offset_k(m2pv); 25.300 - pud_t *pud = pud_offset(pgd, m2pv); 25.301 - pmd_t *pmd = pmd_offset(pud, m2pv); 25.302 - unsigned long m2p_start_mfn = (*(unsigned long *)pmd) >> PAGE_SHIFT; 25.303 - ret = put_user(m2p_start_mfn, (unsigned long *)data) ? -EFAULT: 0; 25.304 - } 25.305 - break; 25.306 + case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN: { 25.307 + unsigned long m2pv = (unsigned long)machine_to_phys_mapping; 25.308 + pgd_t *pgd = pgd_offset_k(m2pv); 25.309 + pud_t *pud = pud_offset(pgd, m2pv); 25.310 + pmd_t *pmd = pmd_offset(pud, m2pv); 25.311 + unsigned long m2p_start_mfn = 25.312 + (*(unsigned long *)pmd) >> PAGE_SHIFT; 25.313 + ret = put_user(m2p_start_mfn, (unsigned long *)data) ? 25.314 + -EFAULT: 0; 25.315 + } 25.316 + break; 25.317 25.318 - case IOCTL_PRIVCMD_INITDOMAIN_STORE: 25.319 - { 25.320 - extern int do_xenbus_probe(void*); 25.321 - unsigned long page; 25.322 + case IOCTL_PRIVCMD_INITDOMAIN_STORE: { 25.323 + extern int do_xenbus_probe(void*); 25.324 + unsigned long page; 25.325 25.326 - if (xen_start_info->store_evtchn != 0) { 25.327 - ret = xen_start_info->store_mfn; 25.328 - break; 25.329 - } 25.330 + if (xen_start_info->store_evtchn != 0) { 25.331 + ret = xen_start_info->store_mfn; 25.332 + break; 25.333 + } 25.334 25.335 - /* Allocate page. */ 25.336 - page = get_zeroed_page(GFP_KERNEL); 25.337 - if (!page) { 25.338 - ret = -ENOMEM; 25.339 - break; 25.340 - } 25.341 + /* Allocate page. */ 25.342 + page = get_zeroed_page(GFP_KERNEL); 25.343 + if (!page) { 25.344 + ret = -ENOMEM; 25.345 + break; 25.346 + } 25.347 25.348 - /* We don't refcnt properly, so set reserved on page. 25.349 - * (this allocation is permanent) */ 25.350 - SetPageReserved(virt_to_page(page)); 25.351 + /* We don't refcnt properly, so set reserved on page. 25.352 + * (this allocation is permanent) */ 25.353 + SetPageReserved(virt_to_page(page)); 25.354 25.355 - /* Initial connect. Setup channel and page. */ 25.356 - xen_start_info->store_evtchn = data; 25.357 - xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> 25.358 - PAGE_SHIFT); 25.359 - ret = xen_start_info->store_mfn; 25.360 + /* Initial connect. Setup channel and page. */ 25.361 + xen_start_info->store_evtchn = data; 25.362 + xen_start_info->store_mfn = 25.363 + pfn_to_mfn(virt_to_phys((void *)page) >> 25.364 + PAGE_SHIFT); 25.365 + ret = xen_start_info->store_mfn; 25.366 25.367 - /* We'll return then this will wait for daemon to answer */ 25.368 - kthread_run(do_xenbus_probe, NULL, "xenbus_probe"); 25.369 - } 25.370 - break; 25.371 + /* We'll return then this will wait for daemon to answer */ 25.372 + kthread_run(do_xenbus_probe, NULL, "xenbus_probe"); 25.373 + } 25.374 + break; 25.375 25.376 - default: 25.377 - ret = -EINVAL; 25.378 - break; 25.379 - } 25.380 - return ret; 25.381 + default: 25.382 + ret = -EINVAL; 25.383 + break; 25.384 + } 25.385 + 25.386 + return ret; 25.387 } 25.388 25.389 static int privcmd_mmap(struct file * file, struct vm_area_struct * vma) 25.390 { 25.391 - /* DONTCOPY is essential for Xen as copy_page_range is broken. */ 25.392 - vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY; 25.393 + /* DONTCOPY is essential for Xen as copy_page_range is broken. */ 25.394 + vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY; 25.395 25.396 - return 0; 25.397 + return 0; 25.398 } 25.399 25.400 static struct file_operations privcmd_file_ops = { 25.401 - .ioctl = privcmd_ioctl, 25.402 - .mmap = privcmd_mmap, 25.403 + .ioctl = privcmd_ioctl, 25.404 + .mmap = privcmd_mmap, 25.405 }; 25.406 25.407 25.408 static int __init privcmd_init(void) 25.409 { 25.410 - privcmd_intf = create_xen_proc_entry("privcmd", 0400); 25.411 - if ( privcmd_intf != NULL ) 25.412 - privcmd_intf->proc_fops = &privcmd_file_ops; 25.413 + privcmd_intf = create_xen_proc_entry("privcmd", 0400); 25.414 + if (privcmd_intf != NULL) 25.415 + privcmd_intf->proc_fops = &privcmd_file_ops; 25.416 25.417 - return 0; 25.418 + return 0; 25.419 } 25.420 25.421 __initcall(privcmd_init); 25.422 + 25.423 +/* 25.424 + * Local variables: 25.425 + * c-file-style: "linux" 25.426 + * indent-tabs-mode: t 25.427 + * c-indent-level: 8 25.428 + * c-basic-offset: 8 25.429 + * tab-width: 8 25.430 + * End: 25.431 + */
26.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h Thu Sep 22 16:05:44 2005 +0100 26.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h Thu Sep 22 16:12:14 2005 +0100 26.3 @@ -84,3 +84,13 @@ extern int num_frontends; 26.4 #define MMAP_VADDR(t,_req) ((t)->mmap_vstart + ((_req) * PAGE_SIZE)) 26.5 26.6 #endif /* __TPMIF__BACKEND__COMMON_H__ */ 26.7 + 26.8 +/* 26.9 + * Local variables: 26.10 + * c-file-style: "linux" 26.11 + * indent-tabs-mode: t 26.12 + * c-indent-level: 8 26.13 + * c-basic-offset: 8 26.14 + * tab-width: 8 26.15 + * End: 26.16 + */
27.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c Thu Sep 22 16:05:44 2005 +0100 27.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c Thu Sep 22 16:12:14 2005 +0100 27.3 @@ -566,7 +566,7 @@ vtpm_op_read(struct file *file, 27.4 * the more time we give the TPM to process the request. 27.5 */ 27.6 mod_timer(&pak->processing_timer, 27.7 - jiffies + (num_frontends * 10 * HZ)); 27.8 + jiffies + (num_frontends * 60 * HZ)); 27.9 dataex.copied_so_far = 0; 27.10 } 27.11 } 27.12 @@ -850,7 +850,7 @@ static int vtpm_queue_packet(struct pack 27.13 write_lock_irqsave(&dataex.pak_lock, flags); 27.14 list_add_tail(&pak->next, &dataex.pending_pak); 27.15 /* give the TPM some time to pick up the request */ 27.16 - mod_timer(&pak->processing_timer, jiffies + (10 * HZ)); 27.17 + mod_timer(&pak->processing_timer, jiffies + (30 * HZ)); 27.18 write_unlock_irqrestore(&dataex.pak_lock, 27.19 flags); 27.20 27.21 @@ -1075,3 +1075,13 @@ tpmback_init(void) 27.22 } 27.23 27.24 __initcall(tpmback_init); 27.25 + 27.26 +/* 27.27 + * Local variables: 27.28 + * c-file-style: "linux" 27.29 + * indent-tabs-mode: t 27.30 + * c-indent-level: 8 27.31 + * c-basic-offset: 8 27.32 + * tab-width: 8 27.33 + * End: 27.34 + */
28.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c Thu Sep 22 16:05:44 2005 +0100 28.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c Thu Sep 22 16:12:14 2005 +0100 28.3 @@ -268,3 +268,13 @@ void tpmif_xenbus_init(void) 28.4 { 28.5 xenbus_register_backend(&tpmback); 28.6 } 28.7 + 28.8 +/* 28.9 + * Local variables: 28.10 + * c-file-style: "linux" 28.11 + * indent-tabs-mode: t 28.12 + * c-indent-level: 8 28.13 + * c-basic-offset: 8 28.14 + * tab-width: 8 28.15 + * End: 28.16 + */
29.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c Thu Sep 22 16:05:44 2005 +0100 29.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c Thu Sep 22 16:12:14 2005 +0100 29.3 @@ -741,3 +741,13 @@ tpmif_init(void) 29.4 } 29.5 29.6 __initcall(tpmif_init); 29.7 + 29.8 +/* 29.9 + * Local variables: 29.10 + * c-file-style: "linux" 29.11 + * indent-tabs-mode: t 29.12 + * c-indent-level: 8 29.13 + * c-basic-offset: 8 29.14 + * tab-width: 8 29.15 + * End: 29.16 + */
30.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h Thu Sep 22 16:05:44 2005 +0100 30.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h Thu Sep 22 16:12:14 2005 +0100 30.3 @@ -38,3 +38,13 @@ struct tx_buffer 30.4 }; 30.5 30.6 #endif 30.7 + 30.8 +/* 30.9 + * Local variables: 30.10 + * c-file-style: "linux" 30.11 + * indent-tabs-mode: t 30.12 + * c-indent-level: 8 30.13 + * c-basic-offset: 8 30.14 + * tab-width: 8 30.15 + * End: 30.16 + */
31.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbback/common.h Thu Sep 22 16:05:44 2005 +0100 31.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 31.3 @@ -1,84 +0,0 @@ 31.4 - 31.5 -#ifndef __USBIF__BACKEND__COMMON_H__ 31.6 -#define __USBIF__BACKEND__COMMON_H__ 31.7 - 31.8 -#include <linux/config.h> 31.9 -#include <linux/version.h> 31.10 -#include <linux/module.h> 31.11 -#include <linux/rbtree.h> 31.12 -#include <linux/interrupt.h> 31.13 -#include <linux/slab.h> 31.14 -#include <linux/blkdev.h> 31.15 -#include <asm/io.h> 31.16 -#include <asm/setup.h> 31.17 -#include <asm/pgalloc.h> 31.18 -#include <asm/hypervisor.h> 31.19 -#include <asm-xen/driver_util.h> 31.20 -#include <asm-xen/xen-public/io/usbif.h> 31.21 - 31.22 -#if 0 31.23 -#define ASSERT(_p) \ 31.24 - if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \ 31.25 - __LINE__, __FILE__); *(int*)0=0; } 31.26 -#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \ 31.27 - __FILE__ , __LINE__ , ## _a ) 31.28 -#else 31.29 -#define ASSERT(_p) ((void)0) 31.30 -#define DPRINTK(_f, _a...) ((void)0) 31.31 -#endif 31.32 - 31.33 -typedef struct usbif_priv_st usbif_priv_t; 31.34 - 31.35 -struct usbif_priv_st { 31.36 - /* Unique identifier for this interface. */ 31.37 - domid_t domid; 31.38 - unsigned int handle; 31.39 - /* Physical parameters of the comms window. */ 31.40 - unsigned long shmem_frame; 31.41 - unsigned int evtchn; 31.42 - /* Comms Information */ 31.43 - usbif_back_ring_t usb_ring; 31.44 - struct vm_struct *usb_ring_area; 31.45 - /* Private fields. */ 31.46 - enum { DISCONNECTED, DISCONNECTING, CONNECTED } status; 31.47 - /* 31.48 - * DISCONNECT response is deferred until pending requests are ack'ed. 31.49 - * We therefore need to store the id from the original request. 31.50 - */ 31.51 - u8 disconnect_rspid; 31.52 - usbif_priv_t *hash_next; 31.53 - struct list_head usbif_list; 31.54 - spinlock_t usb_ring_lock; 31.55 - atomic_t refcnt; 31.56 - 31.57 - struct work_struct work; 31.58 -}; 31.59 - 31.60 -void usbif_create(usbif_be_create_t *create); 31.61 -void usbif_destroy(usbif_be_destroy_t *destroy); 31.62 -void usbif_connect(usbif_be_connect_t *connect); 31.63 -int usbif_disconnect(usbif_be_disconnect_t *disconnect, u8 rsp_id); 31.64 -void usbif_disconnect_complete(usbif_priv_t *up); 31.65 - 31.66 -void usbif_release_port(usbif_be_release_port_t *msg); 31.67 -int usbif_claim_port(usbif_be_claim_port_t *msg); 31.68 -void usbif_release_ports(usbif_priv_t *up); 31.69 - 31.70 -usbif_priv_t *usbif_find(domid_t domid); 31.71 -#define usbif_get(_b) (atomic_inc(&(_b)->refcnt)) 31.72 -#define usbif_put(_b) \ 31.73 - do { \ 31.74 - if ( atomic_dec_and_test(&(_b)->refcnt) ) \ 31.75 - usbif_disconnect_complete(_b); \ 31.76 - } while (0) 31.77 - 31.78 - 31.79 -void usbif_interface_init(void); 31.80 -void usbif_ctrlif_init(void); 31.81 - 31.82 -void usbif_deschedule(usbif_priv_t *up); 31.83 -void remove_from_usbif_list(usbif_priv_t *up); 31.84 - 31.85 -irqreturn_t usbif_be_int(int irq, void *dev_id, struct pt_regs *regs); 31.86 - 31.87 -#endif /* __USBIF__BACKEND__COMMON_H__ */
32.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbback/control.c Thu Sep 22 16:05:44 2005 +0100 32.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 32.3 @@ -1,61 +0,0 @@ 32.4 -/****************************************************************************** 32.5 - * arch/xen/drivers/usbif/backend/control.c 32.6 - * 32.7 - * Routines for interfacing with the control plane. 32.8 - * 32.9 - * Copyright (c) 2004, Keir Fraser 32.10 - */ 32.11 - 32.12 -#include "common.h" 32.13 - 32.14 -static void usbif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id) 32.15 -{ 32.16 - DPRINTK("Received usbif backend message, subtype=%d\n", msg->subtype); 32.17 - 32.18 - switch ( msg->subtype ) 32.19 - { 32.20 - case CMSG_USBIF_BE_CREATE: 32.21 - usbif_create((usbif_be_create_t *)&msg->msg[0]); 32.22 - break; 32.23 - case CMSG_USBIF_BE_DESTROY: 32.24 - usbif_destroy((usbif_be_destroy_t *)&msg->msg[0]); 32.25 - break; 32.26 - case CMSG_USBIF_BE_CONNECT: 32.27 - usbif_connect((usbif_be_connect_t *)&msg->msg[0]); 32.28 - break; 32.29 - case CMSG_USBIF_BE_DISCONNECT: 32.30 - if ( !usbif_disconnect((usbif_be_disconnect_t *)&msg->msg[0],msg->id) ) 32.31 - return; /* Sending the response is deferred until later. */ 32.32 - break; 32.33 - case CMSG_USBIF_BE_CLAIM_PORT: 32.34 - usbif_claim_port((usbif_be_claim_port_t *)&msg->msg[0]); 32.35 - break; 32.36 - case CMSG_USBIF_BE_RELEASE_PORT: 32.37 - usbif_release_port((usbif_be_release_port_t *)&msg->msg[0]); 32.38 - break; 32.39 - default: 32.40 - DPRINTK("Parse error while reading message subtype %d, len %d\n", 32.41 - msg->subtype, msg->length); 32.42 - msg->length = 0; 32.43 - break; 32.44 - } 32.45 - 32.46 - ctrl_if_send_response(msg); 32.47 -} 32.48 - 32.49 -void usbif_ctrlif_init(void) 32.50 -{ 32.51 - ctrl_msg_t cmsg; 32.52 - usbif_be_driver_status_changed_t st; 32.53 - 32.54 - (void)ctrl_if_register_receiver(CMSG_USBIF_BE, usbif_ctrlif_rx, 32.55 - CALLBACK_IN_BLOCKING_CONTEXT); 32.56 - 32.57 - /* Send a driver-UP notification to the domain controller. */ 32.58 - cmsg.type = CMSG_USBIF_BE; 32.59 - cmsg.subtype = CMSG_USBIF_BE_DRIVER_STATUS_CHANGED; 32.60 - cmsg.length = sizeof(usbif_be_driver_status_changed_t); 32.61 - st.status = USBIF_DRIVER_STATUS_UP; 32.62 - memcpy(cmsg.msg, &st, sizeof(st)); 32.63 - ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); 32.64 -}
33.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbback/interface.c Thu Sep 22 16:05:44 2005 +0100 33.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 33.3 @@ -1,241 +0,0 @@ 33.4 -/****************************************************************************** 33.5 - * arch/xen/drivers/usbif/backend/interface.c 33.6 - * 33.7 - * USB device interface management. 33.8 - * 33.9 - * by Mark Williamson, Copyright (c) 2004 33.10 - */ 33.11 - 33.12 -#include "common.h" 33.13 - 33.14 -#define USBIF_HASHSZ 1024 33.15 -#define USBIF_HASH(_d) (((int)(_d))&(USBIF_HASHSZ-1)) 33.16 - 33.17 -static kmem_cache_t *usbif_priv_cachep; 33.18 -static usbif_priv_t *usbif_priv_hash[USBIF_HASHSZ]; 33.19 - 33.20 -usbif_priv_t *usbif_find(domid_t domid) 33.21 -{ 33.22 - usbif_priv_t *up = usbif_priv_hash[USBIF_HASH(domid)]; 33.23 - while ( (up != NULL ) && ( up->domid != domid ) ) 33.24 - up = up->hash_next; 33.25 - return up; 33.26 -} 33.27 - 33.28 -static void __usbif_disconnect_complete(void *arg) 33.29 -{ 33.30 - usbif_priv_t *usbif = (usbif_priv_t *)arg; 33.31 - ctrl_msg_t cmsg; 33.32 - usbif_be_disconnect_t disc; 33.33 - 33.34 - /* 33.35 - * These can't be done in usbif_disconnect() because at that point there 33.36 - * may be outstanding requests at the device whose asynchronous responses 33.37 - * must still be notified to the remote driver. 33.38 - */ 33.39 - free_vm_area(usbif->usb_ring_area); 33.40 - 33.41 - /* Construct the deferred response message. */ 33.42 - cmsg.type = CMSG_USBIF_BE; 33.43 - cmsg.subtype = CMSG_USBIF_BE_DISCONNECT; 33.44 - cmsg.id = usbif->disconnect_rspid; 33.45 - cmsg.length = sizeof(usbif_be_disconnect_t); 33.46 - disc.domid = usbif->domid; 33.47 - disc.status = USBIF_BE_STATUS_OKAY; 33.48 - memcpy(cmsg.msg, &disc, sizeof(disc)); 33.49 - 33.50 - /* 33.51 - * Make sure message is constructed /before/ status change, because 33.52 - * after the status change the 'usbif' structure could be deallocated at 33.53 - * any time. Also make sure we send the response /after/ status change, 33.54 - * as otherwise a subsequent CONNECT request could spuriously fail if 33.55 - * another CPU doesn't see the status change yet. 33.56 - */ 33.57 - mb(); 33.58 - if ( usbif->status != DISCONNECTING ) 33.59 - BUG(); 33.60 - usbif->status = DISCONNECTED; 33.61 - mb(); 33.62 - 33.63 - /* Send the successful response. */ 33.64 - ctrl_if_send_response(&cmsg); 33.65 -} 33.66 - 33.67 -void usbif_disconnect_complete(usbif_priv_t *up) 33.68 -{ 33.69 - INIT_WORK(&up->work, __usbif_disconnect_complete, (void *)up); 33.70 - schedule_work(&up->work); 33.71 -} 33.72 - 33.73 -void usbif_create(usbif_be_create_t *create) 33.74 -{ 33.75 - domid_t domid = create->domid; 33.76 - usbif_priv_t **pup, *up; 33.77 - 33.78 - if ( (up = kmem_cache_alloc(usbif_priv_cachep, GFP_KERNEL)) == NULL ) 33.79 - { 33.80 - DPRINTK("Could not create usbif: out of memory\n"); 33.81 - create->status = USBIF_BE_STATUS_OUT_OF_MEMORY; 33.82 - return; 33.83 - } 33.84 - 33.85 - memset(up, 0, sizeof(*up)); 33.86 - up->domid = domid; 33.87 - up->status = DISCONNECTED; 33.88 - spin_lock_init(&up->usb_ring_lock); 33.89 - atomic_set(&up->refcnt, 0); 33.90 - 33.91 - pup = &usbif_priv_hash[USBIF_HASH(domid)]; 33.92 - while ( *pup != NULL ) 33.93 - { 33.94 - if ( (*pup)->domid == domid ) 33.95 - { 33.96 - create->status = USBIF_BE_STATUS_INTERFACE_EXISTS; 33.97 - kmem_cache_free(usbif_priv_cachep, up); 33.98 - return; 33.99 - } 33.100 - pup = &(*pup)->hash_next; 33.101 - } 33.102 - 33.103 - up->hash_next = *pup; 33.104 - *pup = up; 33.105 - 33.106 - create->status = USBIF_BE_STATUS_OKAY; 33.107 -} 33.108 - 33.109 -void usbif_destroy(usbif_be_destroy_t *destroy) 33.110 -{ 33.111 - domid_t domid = destroy->domid; 33.112 - usbif_priv_t **pup, *up; 33.113 - 33.114 - pup = &usbif_priv_hash[USBIF_HASH(domid)]; 33.115 - while ( (up = *pup) != NULL ) 33.116 - { 33.117 - if ( up->domid == domid ) 33.118 - { 33.119 - if ( up->status != DISCONNECTED ) 33.120 - goto still_connected; 33.121 - goto destroy; 33.122 - } 33.123 - pup = &up->hash_next; 33.124 - } 33.125 - 33.126 - destroy->status = USBIF_BE_STATUS_INTERFACE_NOT_FOUND; 33.127 - return; 33.128 - 33.129 - still_connected: 33.130 - destroy->status = USBIF_BE_STATUS_INTERFACE_CONNECTED; 33.131 - return; 33.132 - 33.133 - destroy: 33.134 - *pup = up->hash_next; 33.135 - usbif_release_ports(up); 33.136 - kmem_cache_free(usbif_priv_cachep, up); 33.137 - destroy->status = USBIF_BE_STATUS_OKAY; 33.138 -} 33.139 - 33.140 -void usbif_connect(usbif_be_connect_t *connect) 33.141 -{ 33.142 - domid_t domid = connect->domid; 33.143 - unsigned int evtchn = connect->evtchn; 33.144 - unsigned long shmem_frame = connect->shmem_frame; 33.145 - pgprot_t prot; 33.146 - int error; 33.147 - usbif_priv_t *up; 33.148 - usbif_sring_t *sring; 33.149 - 33.150 - up = usbif_find(domid); 33.151 - if ( unlikely(up == NULL) ) 33.152 - { 33.153 - DPRINTK("usbif_connect attempted for non-existent usbif (%u)\n", 33.154 - connect->domid); 33.155 - connect->status = USBIF_BE_STATUS_INTERFACE_NOT_FOUND; 33.156 - return; 33.157 - } 33.158 - 33.159 - if ( (up->usb_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL ) 33.160 - { 33.161 - connect->status = USBIF_BE_STATUS_OUT_OF_MEMORY; 33.162 - return; 33.163 - } 33.164 - 33.165 - prot = __pgprot(_KERNPG_TABLE); 33.166 - error = direct_remap_pfn_range(&init_mm, AREALLOC_AREADDR(area->addr), 33.167 - shmem_frame, PAGE_SIZE, 33.168 - prot, domid); 33.169 - if ( error != 0 ) 33.170 - { 33.171 - if ( error == -ENOMEM ) 33.172 - connect->status = USBIF_BE_STATUS_OUT_OF_MEMORY; 33.173 - else if ( error == -EFAULT ) 33.174 - connect->status = USBIF_BE_STATUS_MAPPING_ERROR; 33.175 - else 33.176 - connect->status = USBIF_BE_STATUS_ERROR; 33.177 - free_vm_area(up->usb_ring_area); 33.178 - return; 33.179 - } 33.180 - 33.181 - if ( up->status != DISCONNECTED ) 33.182 - { 33.183 - connect->status = USBIF_BE_STATUS_INTERFACE_CONNECTED; 33.184 - free_vm_area(up->usb_ring_area); 33.185 - return; 33.186 - } 33.187 - 33.188 - sring = (usbif_sring_t *)area->addr; 33.189 - SHARED_RING_INIT(sring); 33.190 - BACK_RING_INIT(&up->usb_ring, sring, PAGE_SIZE); 33.191 - 33.192 - up->evtchn = evtchn; 33.193 - up->shmem_frame = shmem_frame; 33.194 - up->status = CONNECTED; 33.195 - usbif_get(up); 33.196 - 33.197 - (void)bind_evtchn_to_irqhandler( 33.198 - evtchn, usbif_be_int, 0, "usbif-backend", up); 33.199 - 33.200 - connect->status = USBIF_BE_STATUS_OKAY; 33.201 -} 33.202 - 33.203 -/* Remove URBs for this interface before destroying it. */ 33.204 -void usbif_deschedule(usbif_priv_t *up) 33.205 -{ 33.206 - remove_from_usbif_list(up); 33.207 -} 33.208 - 33.209 -int usbif_disconnect(usbif_be_disconnect_t *disconnect, u8 rsp_id) 33.210 -{ 33.211 - domid_t domid = disconnect->domid; 33.212 - usbif_priv_t *up; 33.213 - 33.214 - up = usbif_find(domid); 33.215 - if ( unlikely(up == NULL) ) 33.216 - { 33.217 - DPRINTK("usbif_disconnect attempted for non-existent usbif" 33.218 - " (%u)\n", disconnect->domid); 33.219 - disconnect->status = USBIF_BE_STATUS_INTERFACE_NOT_FOUND; 33.220 - return 1; /* Caller will send response error message. */ 33.221 - } 33.222 - 33.223 - if ( up->status == CONNECTED ) 33.224 - { 33.225 - up->status = DISCONNECTING; 33.226 - up->disconnect_rspid = rsp_id; 33.227 - wmb(); /* Let other CPUs see the status change. */ 33.228 - unbind_evtchn_from_irqhandler(up->evtchn, up); 33.229 - usbif_deschedule(up); 33.230 - usbif_put(up); 33.231 - return 0; /* Caller should not send response message. */ 33.232 - } 33.233 - 33.234 - disconnect->status = USBIF_BE_STATUS_OKAY; 33.235 - return 1; 33.236 -} 33.237 - 33.238 -void __init usbif_interface_init(void) 33.239 -{ 33.240 - usbif_priv_cachep = kmem_cache_create("usbif_priv_cache", 33.241 - sizeof(usbif_priv_t), 33.242 - 0, 0, NULL, NULL); 33.243 - memset(usbif_priv_hash, 0, sizeof(usbif_priv_hash)); 33.244 -}
34.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbback/usbback.c Thu Sep 22 16:05:44 2005 +0100 34.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 34.3 @@ -1,1068 +0,0 @@ 34.4 -/****************************************************************************** 34.5 - * arch/xen/drivers/usbif/backend/main.c 34.6 - * 34.7 - * Backend for the Xen virtual USB driver - provides an abstraction of a 34.8 - * USB host controller to the corresponding frontend driver. 34.9 - * 34.10 - * by Mark Williamson 34.11 - * Copyright (c) 2004 Intel Research Cambridge 34.12 - * Copyright (c) 2004, 2005 Mark Williamson 34.13 - * 34.14 - * Based on arch/xen/drivers/blkif/backend/main.c 34.15 - * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 34.16 - */ 34.17 - 34.18 -#include "common.h" 34.19 - 34.20 - 34.21 -#include <linux/list.h> 34.22 -#include <linux/usb.h> 34.23 -#include <linux/spinlock.h> 34.24 -#include <linux/module.h> 34.25 -#include <linux/tqueue.h> 34.26 - 34.27 -/* 34.28 - * This is rather arbitrary. 34.29 - */ 34.30 -#define MAX_PENDING_REQS 4 34.31 -#define BATCH_PER_DOMAIN 1 34.32 - 34.33 -static unsigned long mmap_vstart; 34.34 - 34.35 -/* Needs to be sufficiently large that we can map the (large) buffers 34.36 - * the USB mass storage driver wants. */ 34.37 -#define MMAP_PAGES_PER_REQUEST \ 34.38 - (128) 34.39 -#define MMAP_PAGES \ 34.40 - (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST) 34.41 - 34.42 -#define MMAP_VADDR(_req,_seg) \ 34.43 - (mmap_vstart + \ 34.44 - ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \ 34.45 - ((_seg) * PAGE_SIZE)) 34.46 - 34.47 - 34.48 -static spinlock_t owned_ports_lock; 34.49 -LIST_HEAD(owned_ports); 34.50 - 34.51 -/* A list of these structures is used to track ownership of physical USB 34.52 - * ports. */ 34.53 -typedef struct 34.54 -{ 34.55 - usbif_priv_t *usbif_priv; 34.56 - char path[16]; 34.57 - int guest_port; 34.58 - int enabled; 34.59 - struct list_head list; 34.60 - unsigned long guest_address; /* The USB device address that has been 34.61 - * assigned by the guest. */ 34.62 - int dev_present; /* Is there a device present? */ 34.63 - struct usb_device * dev; 34.64 - unsigned long ifaces; /* What interfaces are present on this device? */ 34.65 -} owned_port_t; 34.66 - 34.67 - 34.68 -/* 34.69 - * Each outstanding request that we've passed to the lower device layers has a 34.70 - * 'pending_req' allocated to it. The request is complete, the specified 34.71 - * domain has a response queued for it, with the saved 'id' passed back. 34.72 - */ 34.73 -typedef struct { 34.74 - usbif_priv_t *usbif_priv; 34.75 - unsigned long id; 34.76 - int nr_pages; 34.77 - unsigned short operation; 34.78 - int status; 34.79 -} pending_req_t; 34.80 - 34.81 -/* 34.82 - * We can't allocate pending_req's in order, since they may complete out of 34.83 - * order. We therefore maintain an allocation ring. This ring also indicates 34.84 - * when enough work has been passed down -- at that point the allocation ring 34.85 - * will be empty. 34.86 - */ 34.87 -static pending_req_t pending_reqs[MAX_PENDING_REQS]; 34.88 -static unsigned char pending_ring[MAX_PENDING_REQS]; 34.89 -static spinlock_t pend_prod_lock; 34.90 - 34.91 -/* NB. We use a different index type to differentiate from shared usb rings. */ 34.92 -typedef unsigned int PEND_RING_IDX; 34.93 -#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1)) 34.94 -static PEND_RING_IDX pending_prod, pending_cons; 34.95 -#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons) 34.96 - 34.97 -static int do_usb_io_op(usbif_priv_t *usbif, int max_to_do); 34.98 -static void make_response(usbif_priv_t *usbif, unsigned long id, 34.99 - unsigned short op, int st, int inband, 34.100 - unsigned long actual_length); 34.101 -static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned long port); 34.102 -static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req); 34.103 -static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid); 34.104 -static owned_port_t *usbif_find_port(char *); 34.105 - 34.106 -/****************************************************************** 34.107 - * PRIVATE DEBUG FUNCTIONS 34.108 - */ 34.109 - 34.110 -#undef DEBUG 34.111 -#ifdef DEBUG 34.112 - 34.113 -static void dump_port(owned_port_t *p) 34.114 -{ 34.115 - printk(KERN_DEBUG "owned_port_t @ %p\n" 34.116 - " usbif_priv @ %p\n" 34.117 - " path: %s\n" 34.118 - " guest_port: %d\n" 34.119 - " guest_address: %ld\n" 34.120 - " dev_present: %d\n" 34.121 - " dev @ %p\n" 34.122 - " ifaces: 0x%lx\n", 34.123 - p, p->usbif_priv, p->path, p->guest_port, p->guest_address, 34.124 - p->dev_present, p->dev, p->ifaces); 34.125 -} 34.126 - 34.127 - 34.128 -static void dump_request(usbif_request_t *req) 34.129 -{ 34.130 - printk(KERN_DEBUG "id = 0x%lx\n" 34.131 - "devnum %d\n" 34.132 - "endpoint 0x%x\n" 34.133 - "direction %d\n" 34.134 - "speed %d\n" 34.135 - "pipe_type 0x%x\n" 34.136 - "transfer_buffer 0x%lx\n" 34.137 - "length 0x%lx\n" 34.138 - "transfer_flags 0x%lx\n" 34.139 - "setup = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n" 34.140 - "iso_schedule = 0x%lx\n" 34.141 - "num_iso %ld\n", 34.142 - req->id, req->devnum, req->endpoint, req->direction, req->speed, 34.143 - req->pipe_type, req->transfer_buffer, req->length, 34.144 - req->transfer_flags, req->setup[0], req->setup[1], req->setup[2], 34.145 - req->setup[3], req->setup[4], req->setup[5], req->setup[6], 34.146 - req->setup[7], req->iso_schedule, req->num_iso); 34.147 -} 34.148 - 34.149 -static void dump_urb(struct urb *urb) 34.150 -{ 34.151 - printk(KERN_DEBUG "dumping urb @ %p\n", urb); 34.152 - 34.153 -#define DUMP_URB_FIELD(name, format) \ 34.154 - printk(KERN_DEBUG " " # name " " format "\n", urb-> name) 34.155 - 34.156 - DUMP_URB_FIELD(pipe, "0x%x"); 34.157 - DUMP_URB_FIELD(status, "%d"); 34.158 - DUMP_URB_FIELD(transfer_flags, "0x%x"); 34.159 - DUMP_URB_FIELD(transfer_buffer, "%p"); 34.160 - DUMP_URB_FIELD(transfer_buffer_length, "%d"); 34.161 - DUMP_URB_FIELD(actual_length, "%d"); 34.162 -} 34.163 - 34.164 -static void dump_response(usbif_response_t *resp) 34.165 -{ 34.166 - printk(KERN_DEBUG "usbback: Sending response:\n" 34.167 - " id = 0x%x\n" 34.168 - " op = %d\n" 34.169 - " status = %d\n" 34.170 - " data = %d\n" 34.171 - " length = %d\n", 34.172 - resp->id, resp->op, resp->status, resp->data, resp->length); 34.173 -} 34.174 - 34.175 -#else /* DEBUG */ 34.176 - 34.177 -#define dump_port(blah) ((void)0) 34.178 -#define dump_request(blah) ((void)0) 34.179 -#define dump_urb(blah) ((void)0) 34.180 -#define dump_response(blah) ((void)0) 34.181 - 34.182 -#endif /* DEBUG */ 34.183 - 34.184 -/****************************************************************** 34.185 - * MEMORY MANAGEMENT 34.186 - */ 34.187 - 34.188 -static void fast_flush_area(int idx, int nr_pages) 34.189 -{ 34.190 - multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST]; 34.191 - int i; 34.192 - 34.193 - for ( i = 0; i < nr_pages; i++ ) 34.194 - { 34.195 - MULTI_update_va_mapping(mcl+i, MMAP_VADDR(idx, i), 34.196 - __pte(0), 0); 34.197 - } 34.198 - 34.199 - mcl[nr_pages-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 34.200 - if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) ) 34.201 - BUG(); 34.202 -} 34.203 - 34.204 - 34.205 -/****************************************************************** 34.206 - * USB INTERFACE SCHEDULER LIST MAINTENANCE 34.207 - */ 34.208 - 34.209 -static struct list_head usbio_schedule_list; 34.210 -static spinlock_t usbio_schedule_list_lock; 34.211 - 34.212 -static int __on_usbif_list(usbif_priv_t *up) 34.213 -{ 34.214 - return up->usbif_list.next != NULL; 34.215 -} 34.216 - 34.217 -void remove_from_usbif_list(usbif_priv_t *up) 34.218 -{ 34.219 - unsigned long flags; 34.220 - if ( !__on_usbif_list(up) ) return; 34.221 - spin_lock_irqsave(&usbio_schedule_list_lock, flags); 34.222 - if ( __on_usbif_list(up) ) 34.223 - { 34.224 - list_del(&up->usbif_list); 34.225 - up->usbif_list.next = NULL; 34.226 - usbif_put(up); 34.227 - } 34.228 - spin_unlock_irqrestore(&usbio_schedule_list_lock, flags); 34.229 -} 34.230 - 34.231 -static void add_to_usbif_list_tail(usbif_priv_t *up) 34.232 -{ 34.233 - unsigned long flags; 34.234 - if ( __on_usbif_list(up) ) return; 34.235 - spin_lock_irqsave(&usbio_schedule_list_lock, flags); 34.236 - if ( !__on_usbif_list(up) && (up->status == CONNECTED) ) 34.237 - { 34.238 - list_add_tail(&up->usbif_list, &usbio_schedule_list); 34.239 - usbif_get(up); 34.240 - } 34.241 - spin_unlock_irqrestore(&usbio_schedule_list_lock, flags); 34.242 -} 34.243 - 34.244 -void free_pending(int pending_idx) 34.245 -{ 34.246 - unsigned long flags; 34.247 - 34.248 - /* Free the pending request. */ 34.249 - spin_lock_irqsave(&pend_prod_lock, flags); 34.250 - pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; 34.251 - spin_unlock_irqrestore(&pend_prod_lock, flags); 34.252 -} 34.253 - 34.254 -/****************************************************************** 34.255 - * COMPLETION CALLBACK -- Called as urb->complete() 34.256 - */ 34.257 - 34.258 -static void maybe_trigger_usbio_schedule(void); 34.259 - 34.260 -static void __end_usb_io_op(struct urb *purb) 34.261 -{ 34.262 - pending_req_t *pending_req; 34.263 - int pending_idx; 34.264 - 34.265 - pending_req = purb->context; 34.266 - 34.267 - pending_idx = pending_req - pending_reqs; 34.268 - 34.269 - ASSERT(purb->actual_length <= purb->transfer_buffer_length); 34.270 - ASSERT(purb->actual_length <= pending_req->nr_pages * PAGE_SIZE); 34.271 - 34.272 - /* An error fails the entire request. */ 34.273 - if ( purb->status ) 34.274 - { 34.275 - printk(KERN_WARNING "URB @ %p failed. Status %d\n", purb, purb->status); 34.276 - } 34.277 - 34.278 - if ( usb_pipetype(purb->pipe) == 0 ) 34.279 - { 34.280 - int i; 34.281 - usbif_iso_t *sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, pending_req->nr_pages - 1); 34.282 - 34.283 - /* If we're dealing with an iso pipe, we need to copy back the schedule. */ 34.284 - for ( i = 0; i < purb->number_of_packets; i++ ) 34.285 - { 34.286 - sched[i].length = purb->iso_frame_desc[i].actual_length; 34.287 - ASSERT(sched[i].buffer_offset == 34.288 - purb->iso_frame_desc[i].offset); 34.289 - sched[i].status = purb->iso_frame_desc[i].status; 34.290 - } 34.291 - } 34.292 - 34.293 - fast_flush_area(pending_req - pending_reqs, pending_req->nr_pages); 34.294 - 34.295 - kfree(purb->setup_packet); 34.296 - 34.297 - make_response(pending_req->usbif_priv, pending_req->id, 34.298 - pending_req->operation, pending_req->status, 0, purb->actual_length); 34.299 - usbif_put(pending_req->usbif_priv); 34.300 - 34.301 - usb_free_urb(purb); 34.302 - 34.303 - free_pending(pending_idx); 34.304 - 34.305 - rmb(); 34.306 - 34.307 - /* Check for anything still waiting in the rings, having freed a request... */ 34.308 - maybe_trigger_usbio_schedule(); 34.309 -} 34.310 - 34.311 -/****************************************************************** 34.312 - * SCHEDULER FUNCTIONS 34.313 - */ 34.314 - 34.315 -static DECLARE_WAIT_QUEUE_HEAD(usbio_schedule_wait); 34.316 - 34.317 -static int usbio_schedule(void *arg) 34.318 -{ 34.319 - DECLARE_WAITQUEUE(wq, current); 34.320 - 34.321 - usbif_priv_t *up; 34.322 - struct list_head *ent; 34.323 - 34.324 - daemonize(); 34.325 - 34.326 - for ( ; ; ) 34.327 - { 34.328 - /* Wait for work to do. */ 34.329 - add_wait_queue(&usbio_schedule_wait, &wq); 34.330 - set_current_state(TASK_INTERRUPTIBLE); 34.331 - if ( (NR_PENDING_REQS == MAX_PENDING_REQS) || 34.332 - list_empty(&usbio_schedule_list) ) 34.333 - schedule(); 34.334 - __set_current_state(TASK_RUNNING); 34.335 - remove_wait_queue(&usbio_schedule_wait, &wq); 34.336 - 34.337 - /* Queue up a batch of requests. */ 34.338 - while ( (NR_PENDING_REQS < MAX_PENDING_REQS) && 34.339 - !list_empty(&usbio_schedule_list) ) 34.340 - { 34.341 - ent = usbio_schedule_list.next; 34.342 - up = list_entry(ent, usbif_priv_t, usbif_list); 34.343 - usbif_get(up); 34.344 - remove_from_usbif_list(up); 34.345 - if ( do_usb_io_op(up, BATCH_PER_DOMAIN) ) 34.346 - add_to_usbif_list_tail(up); 34.347 - usbif_put(up); 34.348 - } 34.349 - } 34.350 -} 34.351 - 34.352 -static void maybe_trigger_usbio_schedule(void) 34.353 -{ 34.354 - /* 34.355 - * Needed so that two processes, who together make the following predicate 34.356 - * true, don't both read stale values and evaluate the predicate 34.357 - * incorrectly. Incredibly unlikely to stall the scheduler on x86, but... 34.358 - */ 34.359 - smp_mb(); 34.360 - 34.361 - if ( !list_empty(&usbio_schedule_list) ) 34.362 - wake_up(&usbio_schedule_wait); 34.363 -} 34.364 - 34.365 - 34.366 -/****************************************************************************** 34.367 - * NOTIFICATION FROM GUEST OS. 34.368 - */ 34.369 - 34.370 -irqreturn_t usbif_be_int(int irq, void *dev_id, struct pt_regs *regs) 34.371 -{ 34.372 - usbif_priv_t *up = dev_id; 34.373 - 34.374 - smp_mb(); 34.375 - 34.376 - add_to_usbif_list_tail(up); 34.377 - 34.378 - /* Will in fact /always/ trigger an io schedule in this case. */ 34.379 - maybe_trigger_usbio_schedule(); 34.380 - 34.381 - return IRQ_HANDLED; 34.382 -} 34.383 - 34.384 - 34.385 - 34.386 -/****************************************************************** 34.387 - * DOWNWARD CALLS -- These interface with the usb-device layer proper. 34.388 - */ 34.389 - 34.390 -static int do_usb_io_op(usbif_priv_t *up, int max_to_do) 34.391 -{ 34.392 - usbif_back_ring_t *usb_ring = &up->usb_ring; 34.393 - usbif_request_t *req; 34.394 - RING_IDX i, rp; 34.395 - int more_to_do = 0; 34.396 - 34.397 - rp = usb_ring->sring->req_prod; 34.398 - rmb(); /* Ensure we see queued requests up to 'rp'. */ 34.399 - 34.400 - /* Take items off the comms ring, taking care not to overflow. */ 34.401 - for ( i = usb_ring->req_cons; 34.402 - (i != rp) && !RING_REQUEST_CONS_OVERFLOW(usb_ring, i); 34.403 - i++ ) 34.404 - { 34.405 - if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) ) 34.406 - { 34.407 - more_to_do = 1; 34.408 - break; 34.409 - } 34.410 - 34.411 - req = RING_GET_REQUEST(usb_ring, i); 34.412 - 34.413 - switch ( req->operation ) 34.414 - { 34.415 - case USBIF_OP_PROBE: 34.416 - dispatch_usb_probe(up, req->id, req->port); 34.417 - break; 34.418 - 34.419 - case USBIF_OP_IO: 34.420 - /* Assemble an appropriate URB. */ 34.421 - dispatch_usb_io(up, req); 34.422 - break; 34.423 - 34.424 - case USBIF_OP_RESET: 34.425 - dispatch_usb_reset(up, req->port); 34.426 - break; 34.427 - 34.428 - default: 34.429 - DPRINTK("error: unknown USB io operation [%d]\n", 34.430 - req->operation); 34.431 - make_response(up, req->id, req->operation, -EINVAL, 0, 0); 34.432 - break; 34.433 - } 34.434 - } 34.435 - 34.436 - usb_ring->req_cons = i; 34.437 - 34.438 - return more_to_do; 34.439 -} 34.440 - 34.441 -static owned_port_t *find_guest_port(usbif_priv_t *up, int port) 34.442 -{ 34.443 - unsigned long flags; 34.444 - struct list_head *l; 34.445 - 34.446 - spin_lock_irqsave(&owned_ports_lock, flags); 34.447 - list_for_each(l, &owned_ports) 34.448 - { 34.449 - owned_port_t *p = list_entry(l, owned_port_t, list); 34.450 - if(p->usbif_priv == up && p->guest_port == port) 34.451 - { 34.452 - spin_unlock_irqrestore(&owned_ports_lock, flags); 34.453 - return p; 34.454 - } 34.455 - } 34.456 - spin_unlock_irqrestore(&owned_ports_lock, flags); 34.457 - 34.458 - return NULL; 34.459 -} 34.460 - 34.461 -static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid) 34.462 -{ 34.463 - owned_port_t *port = find_guest_port(up, portid); 34.464 - int ret = 0; 34.465 - 34.466 - 34.467 - /* Allowing the guest to actually reset the device causes more problems 34.468 - * than it's worth. We just fake it out in software but we will do a real 34.469 - * reset when the interface is destroyed. */ 34.470 - 34.471 - dump_port(port); 34.472 - 34.473 - port->guest_address = 0; 34.474 - /* If there's an attached device then the port is now enabled. */ 34.475 - if ( port->dev_present ) 34.476 - port->enabled = 1; 34.477 - else 34.478 - port->enabled = 0; 34.479 - 34.480 - make_response(up, 0, USBIF_OP_RESET, ret, 0, 0); 34.481 -} 34.482 - 34.483 -static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned long portid) 34.484 -{ 34.485 - owned_port_t *port = find_guest_port(up, portid); 34.486 - int ret; 34.487 - 34.488 - if ( port != NULL ) 34.489 - ret = port->dev_present; 34.490 - else 34.491 - { 34.492 - ret = -EINVAL; 34.493 - printk(KERN_INFO "dispatch_usb_probe(): invalid port probe request " 34.494 - "(port %ld)\n", portid); 34.495 - } 34.496 - 34.497 - /* Probe result is sent back in-band. Probes don't have an associated id 34.498 - * right now... */ 34.499 - make_response(up, id, USBIF_OP_PROBE, ret, portid, 0); 34.500 -} 34.501 - 34.502 -/** 34.503 - * check_iso_schedule - safety check the isochronous schedule for an URB 34.504 - * @purb : the URB in question 34.505 - */ 34.506 -static int check_iso_schedule(struct urb *purb) 34.507 -{ 34.508 - int i; 34.509 - unsigned long total_length = 0; 34.510 - 34.511 - for ( i = 0; i < purb->number_of_packets; i++ ) 34.512 - { 34.513 - struct usb_iso_packet_descriptor *desc = &purb->iso_frame_desc[i]; 34.514 - 34.515 - if ( desc->offset >= purb->transfer_buffer_length 34.516 - || ( desc->offset + desc->length) > purb->transfer_buffer_length ) 34.517 - return -EINVAL; 34.518 - 34.519 - total_length += desc->length; 34.520 - 34.521 - if ( total_length > purb->transfer_buffer_length ) 34.522 - return -EINVAL; 34.523 - } 34.524 - 34.525 - return 0; 34.526 -} 34.527 - 34.528 -owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req); 34.529 - 34.530 -static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req) 34.531 -{ 34.532 - unsigned long buffer_mach; 34.533 - int i = 0, offset = 0, 34.534 - pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)]; 34.535 - pending_req_t *pending_req; 34.536 - unsigned long remap_prot; 34.537 - multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST]; 34.538 - struct urb *purb = NULL; 34.539 - owned_port_t *port; 34.540 - unsigned char *setup; 34.541 - 34.542 - dump_request(req); 34.543 - 34.544 - if ( NR_PENDING_REQS == MAX_PENDING_REQS ) 34.545 - { 34.546 - printk(KERN_WARNING "usbback: Max requests already queued. " 34.547 - "Giving up!\n"); 34.548 - 34.549 - return; 34.550 - } 34.551 - 34.552 - port = find_port_for_request(up, req); 34.553 - 34.554 - if ( port == NULL ) 34.555 - { 34.556 - printk(KERN_WARNING "No such device! (%d)\n", req->devnum); 34.557 - dump_request(req); 34.558 - 34.559 - make_response(up, req->id, req->operation, -ENODEV, 0, 0); 34.560 - return; 34.561 - } 34.562 - else if ( !port->dev_present ) 34.563 - { 34.564 - /* In normal operation, we'll only get here if a device is unplugged 34.565 - * and the frontend hasn't noticed yet. */ 34.566 - make_response(up, req->id, req->operation, -ENODEV, 0, 0); 34.567 - return; 34.568 - } 34.569 - 34.570 - 34.571 - setup = kmalloc(8, GFP_KERNEL); 34.572 - 34.573 - if ( setup == NULL ) 34.574 - goto no_mem; 34.575 - 34.576 - /* Copy request out for safety. */ 34.577 - memcpy(setup, req->setup, 8); 34.578 - 34.579 - if( setup[0] == 0x0 && setup[1] == 0x5) 34.580 - { 34.581 - /* To virtualise the USB address space, we need to intercept 34.582 - * set_address messages and emulate. From the USB specification: 34.583 - * bmRequestType = 0x0; 34.584 - * Brequest = SET_ADDRESS (i.e. 0x5) 34.585 - * wValue = device address 34.586 - * wIndex = 0 34.587 - * wLength = 0 34.588 - * data = None 34.589 - */ 34.590 - /* Store into the guest transfer buffer using cpu_to_le16 */ 34.591 - port->guest_address = le16_to_cpu(*(u16 *)(setup + 2)); 34.592 - /* Make a successful response. That was easy! */ 34.593 - 34.594 - make_response(up, req->id, req->operation, 0, 0, 0); 34.595 - 34.596 - kfree(setup); 34.597 - return; 34.598 - } 34.599 - else if ( setup[0] == 0x0 && setup[1] == 0x9 ) 34.600 - { 34.601 - /* The host kernel needs to know what device configuration is in use 34.602 - * because various error checks get confused otherwise. We just do 34.603 - * configuration settings here, under controlled conditions. 34.604 - */ 34.605 - 34.606 - /* Ignore configuration setting and hope that the host kernel 34.607 - did it right. */ 34.608 - /* usb_set_configuration(port->dev, setup[2]); */ 34.609 - 34.610 - make_response(up, req->id, req->operation, 0, 0, 0); 34.611 - 34.612 - kfree(setup); 34.613 - return; 34.614 - } 34.615 - else if ( setup[0] == 0x1 && setup[1] == 0xB ) 34.616 - { 34.617 - /* The host kernel needs to know what device interface is in use 34.618 - * because various error checks get confused otherwise. We just do 34.619 - * configuration settings here, under controlled conditions. 34.620 - */ 34.621 - usb_set_interface(port->dev, (setup[4] | setup[5] << 8), 34.622 - (setup[2] | setup[3] << 8) ); 34.623 - 34.624 - make_response(up, req->id, req->operation, 0, 0, 0); 34.625 - 34.626 - kfree(setup); 34.627 - return; 34.628 - } 34.629 - 34.630 - if ( ( req->transfer_buffer - (req->transfer_buffer & PAGE_MASK) 34.631 - + req->length ) 34.632 - > MMAP_PAGES_PER_REQUEST * PAGE_SIZE ) 34.633 - { 34.634 - printk(KERN_WARNING "usbback: request of %lu bytes too large\n", 34.635 - req->length); 34.636 - make_response(up, req->id, req->operation, -EINVAL, 0, 0); 34.637 - kfree(setup); 34.638 - return; 34.639 - } 34.640 - 34.641 - buffer_mach = req->transfer_buffer; 34.642 - 34.643 - if( buffer_mach == 0 ) 34.644 - goto no_remap; 34.645 - 34.646 - ASSERT((req->length >> PAGE_SHIFT) <= MMAP_PAGES_PER_REQUEST); 34.647 - ASSERT(buffer_mach); 34.648 - 34.649 - /* Always map writeable for now. */ 34.650 - remap_prot = _KERNPG_TABLE; 34.651 - 34.652 - for ( i = 0, offset = 0; offset < req->length; 34.653 - i++, offset += PAGE_SIZE ) 34.654 - { 34.655 - MULTI_update_va_mapping_otherdomain( 34.656 - mcl+i, MMAP_VADDR(pending_idx, i), 34.657 - pfn_pte_ma((buffer_mach + offset) >> PAGE_SHIFT, remap_prot), 34.658 - 0, up->domid); 34.659 - 34.660 - phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] = 34.661 - FOREIGN_FRAME((buffer_mach + offset) >> PAGE_SHIFT); 34.662 - 34.663 - ASSERT(virt_to_mfn(MMAP_VADDR(pending_idx, i)) 34.664 - == ((buffer_mach >> PAGE_SHIFT) + i)); 34.665 - } 34.666 - 34.667 - if ( req->pipe_type == 0 && req->num_iso > 0 ) /* Maybe schedule ISO... */ 34.668 - { 34.669 - /* Map in ISO schedule, if necessary. */ 34.670 - MULTI_update_va_mapping_otherdomain( 34.671 - mcl+i, MMAP_VADDR(pending_idx, i), 34.672 - pfn_pte_ma(req->iso_schedule >> PAGE_SHIFT, remap_prot), 34.673 - 0, up->domid); 34.674 - 34.675 - phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] = 34.676 - FOREIGN_FRAME(req->iso_schedule >> PAGE_SHIFT); 34.677 - 34.678 - i++; 34.679 - } 34.680 - 34.681 - if ( unlikely(HYPERVISOR_multicall(mcl, i) != 0) ) 34.682 - BUG(); 34.683 - 34.684 - { 34.685 - int j; 34.686 - for ( j = 0; j < i; j++ ) 34.687 - { 34.688 - if ( unlikely(mcl[j].result != 0) ) 34.689 - { 34.690 - printk(KERN_WARNING 34.691 - "invalid buffer %d -- could not remap it\n", j); 34.692 - fast_flush_area(pending_idx, i); 34.693 - goto bad_descriptor; 34.694 - } 34.695 - } 34.696 - } 34.697 - 34.698 - no_remap: 34.699 - 34.700 - ASSERT(i <= MMAP_PAGES_PER_REQUEST); 34.701 - ASSERT(i * PAGE_SIZE >= req->length); 34.702 - 34.703 - /* We have to do this because some things might complete out of order. */ 34.704 - pending_req = &pending_reqs[pending_idx]; 34.705 - pending_req->usbif_priv= up; 34.706 - pending_req->id = req->id; 34.707 - pending_req->operation = req->operation; 34.708 - pending_req->nr_pages = i; 34.709 - 34.710 - pending_cons++; 34.711 - 34.712 - usbif_get(up); 34.713 - 34.714 - /* Fill out an actual request for the USB layer. */ 34.715 - purb = usb_alloc_urb(req->num_iso); 34.716 - 34.717 - if ( purb == NULL ) 34.718 - { 34.719 - usbif_put(up); 34.720 - free_pending(pending_idx); 34.721 - goto no_mem; 34.722 - } 34.723 - 34.724 - purb->dev = port->dev; 34.725 - purb->context = pending_req; 34.726 - purb->transfer_buffer = 34.727 - (void *)(MMAP_VADDR(pending_idx, 0) + (buffer_mach & ~PAGE_MASK)); 34.728 - if(buffer_mach == 0) 34.729 - purb->transfer_buffer = NULL; 34.730 - purb->complete = __end_usb_io_op; 34.731 - purb->transfer_buffer_length = req->length; 34.732 - purb->transfer_flags = req->transfer_flags; 34.733 - 34.734 - purb->pipe = 0; 34.735 - purb->pipe |= req->direction << 7; 34.736 - purb->pipe |= port->dev->devnum << 8; 34.737 - purb->pipe |= req->speed << 26; 34.738 - purb->pipe |= req->pipe_type << 30; 34.739 - purb->pipe |= req->endpoint << 15; 34.740 - 34.741 - purb->number_of_packets = req->num_iso; 34.742 - 34.743 - if ( purb->number_of_packets * sizeof(usbif_iso_t) > PAGE_SIZE ) 34.744 - goto urb_error; 34.745 - 34.746 - /* Make sure there's always some kind of timeout. */ 34.747 - purb->timeout = ( req->timeout > 0 ) ? (req->timeout * HZ) / 1000 34.748 - : 1000; 34.749 - 34.750 - purb->setup_packet = setup; 34.751 - 34.752 - if ( req->pipe_type == 0 ) /* ISO */ 34.753 - { 34.754 - int j; 34.755 - usbif_iso_t *iso_sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, i - 1); 34.756 - 34.757 - /* If we're dealing with an iso pipe, we need to copy in a schedule. */ 34.758 - for ( j = 0; j < purb->number_of_packets; j++ ) 34.759 - { 34.760 - purb->iso_frame_desc[j].length = iso_sched[j].length; 34.761 - purb->iso_frame_desc[j].offset = iso_sched[j].buffer_offset; 34.762 - iso_sched[j].status = 0; 34.763 - } 34.764 - } 34.765 - 34.766 - if ( check_iso_schedule(purb) != 0 ) 34.767 - goto urb_error; 34.768 - 34.769 - if ( usb_submit_urb(purb) != 0 ) 34.770 - goto urb_error; 34.771 - 34.772 - return; 34.773 - 34.774 - urb_error: 34.775 - dump_urb(purb); 34.776 - usbif_put(up); 34.777 - free_pending(pending_idx); 34.778 - 34.779 - bad_descriptor: 34.780 - kfree ( setup ); 34.781 - if ( purb != NULL ) 34.782 - usb_free_urb(purb); 34.783 - make_response(up, req->id, req->operation, -EINVAL, 0, 0); 34.784 - return; 34.785 - 34.786 - no_mem: 34.787 - if ( setup != NULL ) 34.788 - kfree(setup); 34.789 - make_response(up, req->id, req->operation, -ENOMEM, 0, 0); 34.790 - return; 34.791 -} 34.792 - 34.793 - 34.794 - 34.795 -/****************************************************************** 34.796 - * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING 34.797 - */ 34.798 - 34.799 - 34.800 -static void make_response(usbif_priv_t *up, unsigned long id, 34.801 - unsigned short op, int st, int inband, 34.802 - unsigned long length) 34.803 -{ 34.804 - usbif_response_t *resp; 34.805 - unsigned long flags; 34.806 - usbif_back_ring_t *usb_ring = &up->usb_ring; 34.807 - 34.808 - /* Place on the response ring for the relevant domain. */ 34.809 - spin_lock_irqsave(&up->usb_ring_lock, flags); 34.810 - resp = RING_GET_RESPONSE(usb_ring, usb_ring->rsp_prod_pvt); 34.811 - resp->id = id; 34.812 - resp->operation = op; 34.813 - resp->status = st; 34.814 - resp->data = inband; 34.815 - resp->length = length; 34.816 - wmb(); /* Ensure other side can see the response fields. */ 34.817 - 34.818 - dump_response(resp); 34.819 - 34.820 - usb_ring->rsp_prod_pvt++; 34.821 - RING_PUSH_RESPONSES(usb_ring); 34.822 - spin_unlock_irqrestore(&up->usb_ring_lock, flags); 34.823 - 34.824 - /* Kick the relevant domain. */ 34.825 - notify_via_evtchn(up->evtchn); 34.826 -} 34.827 - 34.828 -/** 34.829 - * usbif_claim_port - claim devices on a port on behalf of guest 34.830 - * 34.831 - * Once completed, this will ensure that any device attached to that 34.832 - * port is claimed by this driver for use by the guest. 34.833 - */ 34.834 -int usbif_claim_port(usbif_be_claim_port_t *msg) 34.835 -{ 34.836 - owned_port_t *o_p; 34.837 - 34.838 - /* Sanity... */ 34.839 - if ( usbif_find_port(msg->path) != NULL ) 34.840 - { 34.841 - printk(KERN_WARNING "usbback: Attempted to claim USB port " 34.842 - "we already own!\n"); 34.843 - return -EINVAL; 34.844 - } 34.845 - 34.846 - /* No need for a slab cache - this should be infrequent. */ 34.847 - o_p = kmalloc(sizeof(owned_port_t), GFP_KERNEL); 34.848 - 34.849 - if ( o_p == NULL ) 34.850 - return -ENOMEM; 34.851 - 34.852 - o_p->enabled = 0; 34.853 - o_p->usbif_priv = usbif_find(msg->domid); 34.854 - o_p->guest_port = msg->usbif_port; 34.855 - o_p->dev_present = 0; 34.856 - o_p->guest_address = 0; /* Default address. */ 34.857 - 34.858 - strcpy(o_p->path, msg->path); 34.859 - 34.860 - spin_lock_irq(&owned_ports_lock); 34.861 - 34.862 - list_add(&o_p->list, &owned_ports); 34.863 - 34.864 - spin_unlock_irq(&owned_ports_lock); 34.865 - 34.866 - printk(KERN_INFO "usbback: Claimed USB port (%s) for %d.%d\n", o_p->path, 34.867 - msg->domid, msg->usbif_port); 34.868 - 34.869 - /* Force a reprobe for unclaimed devices. */ 34.870 - usb_scan_devices(); 34.871 - 34.872 - return 0; 34.873 -} 34.874 - 34.875 -owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req) 34.876 -{ 34.877 - unsigned long flags; 34.878 - struct list_head *port; 34.879 - 34.880 - /* I'm assuming this is not called from IRQ context - correct? I think 34.881 - * it's probably only called in response to control messages or plug events 34.882 - * in the USB hub kernel thread, so should be OK. */ 34.883 - spin_lock_irqsave(&owned_ports_lock, flags); 34.884 - list_for_each(port, &owned_ports) 34.885 - { 34.886 - owned_port_t *p = list_entry(port, owned_port_t, list); 34.887 - if(p->usbif_priv == up && p->guest_address == req->devnum && p->enabled ) 34.888 - { 34.889 - dump_port(p); 34.890 - 34.891 - spin_unlock_irqrestore(&owned_ports_lock, flags); 34.892 - return p; 34.893 - } 34.894 - } 34.895 - spin_unlock_irqrestore(&owned_ports_lock, flags); 34.896 - 34.897 - return NULL; 34.898 -} 34.899 - 34.900 -owned_port_t *__usbif_find_port(char *path) 34.901 -{ 34.902 - struct list_head *port; 34.903 - 34.904 - list_for_each(port, &owned_ports) 34.905 - { 34.906 - owned_port_t *p = list_entry(port, owned_port_t, list); 34.907 - if(!strcmp(path, p->path)) 34.908 - { 34.909 - return p; 34.910 - } 34.911 - } 34.912 - 34.913 - return NULL; 34.914 -} 34.915 - 34.916 -owned_port_t *usbif_find_port(char *path) 34.917 -{ 34.918 - owned_port_t *ret; 34.919 - unsigned long flags; 34.920 - 34.921 - spin_lock_irqsave(&owned_ports_lock, flags); 34.922 - ret = __usbif_find_port(path); 34.923 - spin_unlock_irqrestore(&owned_ports_lock, flags); 34.924 - 34.925 - return ret; 34.926 -} 34.927 - 34.928 - 34.929 -static void *probe(struct usb_device *dev, unsigned iface, 34.930 - const struct usb_device_id *id) 34.931 -{ 34.932 - owned_port_t *p; 34.933 - 34.934 - /* We don't care what the device is - if we own the port, we want it. We 34.935 - * don't deal with device-specifics in this driver, so we don't care what 34.936 - * the device actually is ;-) */ 34.937 - if ( ( p = usbif_find_port(dev->devpath) ) != NULL ) 34.938 - { 34.939 - printk(KERN_INFO "usbback: claimed device attached to owned port\n"); 34.940 - 34.941 - p->dev_present = 1; 34.942 - p->dev = dev; 34.943 - set_bit(iface, &p->ifaces); 34.944 - 34.945 - return p->usbif_priv; 34.946 - } 34.947 - else 34.948 - printk(KERN_INFO "usbback: hotplug for non-owned port (%s), ignoring\n", 34.949 - dev->devpath); 34.950 - 34.951 - 34.952 - return NULL; 34.953 -} 34.954 - 34.955 -static void disconnect(struct usb_device *dev, void *usbif) 34.956 -{ 34.957 - /* Note the device is removed so we can tell the guest when it probes. */ 34.958 - owned_port_t *port = usbif_find_port(dev->devpath); 34.959 - port->dev_present = 0; 34.960 - port->dev = NULL; 34.961 - port->ifaces = 0; 34.962 -} 34.963 - 34.964 - 34.965 -struct usb_driver driver = 34.966 -{ 34.967 - .owner = THIS_MODULE, 34.968 - .name = "Xen USB Backend", 34.969 - .probe = probe, 34.970 - .disconnect = disconnect, 34.971 - .id_table = NULL, 34.972 -}; 34.973 - 34.974 -/* __usbif_release_port - internal mechanics for releasing a port */ 34.975 -void __usbif_release_port(owned_port_t *p) 34.976 -{ 34.977 - int i; 34.978 - 34.979 - for ( i = 0; p->ifaces != 0; i++) 34.980 - if ( p->ifaces & 1 << i ) 34.981 - { 34.982 - usb_driver_release_interface(&driver, usb_ifnum_to_if(p->dev, i)); 34.983 - clear_bit(i, &p->ifaces); 34.984 - } 34.985 - list_del(&p->list); 34.986 - 34.987 - /* Reset the real device. We don't simulate disconnect / probe for other 34.988 - * drivers in this kernel because we assume the device is completely under 34.989 - * the control of ourselves (i.e. the guest!). This should ensure that the 34.990 - * device is in a sane state for the next customer ;-) */ 34.991 - 34.992 - /* MAW NB: we're not resetting the real device here. This looks perfectly 34.993 - * valid to me but it causes memory corruption. We seem to get away with not 34.994 - * resetting for now, although it'd be nice to have this tracked down. */ 34.995 -/* if ( p->dev != NULL) */ 34.996 -/* usb_reset_device(p->dev); */ 34.997 - 34.998 - kfree(p); 34.999 -} 34.1000 - 34.1001 - 34.1002 -/** 34.1003 - * usbif_release_port - stop claiming devices on a port on behalf of guest 34.1004 - */ 34.1005 -void usbif_release_port(usbif_be_release_port_t *msg) 34.1006 -{ 34.1007 - owned_port_t *p; 34.1008 - 34.1009 - spin_lock_irq(&owned_ports_lock); 34.1010 - p = __usbif_find_port(msg->path); 34.1011 - __usbif_release_port(p); 34.1012 - spin_unlock_irq(&owned_ports_lock); 34.1013 -} 34.1014 - 34.1015 -void usbif_release_ports(usbif_priv_t *up) 34.1016 -{ 34.1017 - struct list_head *port, *tmp; 34.1018 - unsigned long flags; 34.1019 - 34.1020 - spin_lock_irqsave(&owned_ports_lock, flags); 34.1021 - list_for_each_safe(port, tmp, &owned_ports) 34.1022 - { 34.1023 - owned_port_t *p = list_entry(port, owned_port_t, list); 34.1024 - if ( p->usbif_priv == up ) 34.1025 - __usbif_release_port(p); 34.1026 - } 34.1027 - spin_unlock_irqrestore(&owned_ports_lock, flags); 34.1028 -} 34.1029 - 34.1030 -static int __init usbif_init(void) 34.1031 -{ 34.1032 - int i; 34.1033 - struct page *page; 34.1034 - 34.1035 - if ( !(xen_start_info->flags & SIF_INITDOMAIN) && 34.1036 - !(xen_start_info->flags & SIF_USB_BE_DOMAIN) ) 34.1037 - return 0; 34.1038 - 34.1039 - page = balloon_alloc_empty_page_range(MMAP_PAGES); 34.1040 - BUG_ON(page == NULL); 34.1041 - mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); 34.1042 - 34.1043 - pending_cons = 0; 34.1044 - pending_prod = MAX_PENDING_REQS; 34.1045 - memset(pending_reqs, 0, sizeof(pending_reqs)); 34.1046 - for ( i = 0; i < MAX_PENDING_REQS; i++ ) 34.1047 - pending_ring[i] = i; 34.1048 - 34.1049 - spin_lock_init(&pend_prod_lock); 34.1050 - 34.1051 - spin_lock_init(&owned_ports_lock); 34.1052 - INIT_LIST_HEAD(&owned_ports); 34.1053 - 34.1054 - spin_lock_init(&usbio_schedule_list_lock); 34.1055 - INIT_LIST_HEAD(&usbio_schedule_list); 34.1056 - 34.1057 - if ( kernel_thread(usbio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 ) 34.1058 - BUG(); 34.1059 - 34.1060 - usbif_interface_init(); 34.1061 - 34.1062 - usbif_ctrlif_init(); 34.1063 - 34.1064 - usb_register(&driver); 34.1065 - 34.1066 - printk(KERN_INFO "Xen USB Backend Initialised"); 34.1067 - 34.1068 - return 0; 34.1069 -} 34.1070 - 34.1071 -__initcall(usbif_init);
35.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbfront/usbfront.c Thu Sep 22 16:05:44 2005 +0100 35.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 35.3 @@ -1,1735 +0,0 @@ 35.4 -/* 35.5 - * Xen Virtual USB Frontend Driver 35.6 - * 35.7 - * This file contains the first version of the Xen virtual USB hub 35.8 - * that I've managed not to delete by mistake (3rd time lucky!). 35.9 - * 35.10 - * Based on Linux's uhci.c, original copyright notices are displayed 35.11 - * below. Portions also (c) 2004 Intel Research Cambridge 35.12 - * and (c) 2004, 2005 Mark Williamson 35.13 - * 35.14 - * Contact <mark.williamson@cl.cam.ac.uk> or 35.15 - * <xen-devel@lists.sourceforge.net> regarding this code. 35.16 - * 35.17 - * Still to be (maybe) implemented: 35.18 - * - migration / backend restart support? 35.19 - * - support for building / using as a module 35.20 - */ 35.21 - 35.22 -/* 35.23 - * Universal Host Controller Interface driver for USB. 35.24 - * 35.25 - * Maintainer: Johannes Erdfelt <johannes@erdfelt.com> 35.26 - * 35.27 - * (C) Copyright 1999 Linus Torvalds 35.28 - * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com 35.29 - * (C) Copyright 1999 Randy Dunlap 35.30 - * (C) Copyright 1999 Georg Acher, acher@in.tum.de 35.31 - * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de 35.32 - * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch 35.33 - * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at 35.34 - * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface 35.35 - * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). 35.36 - * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) 35.37 - * 35.38 - * Intel documents this fairly well, and as far as I know there 35.39 - * are no royalties or anything like that, but even so there are 35.40 - * people who decided that they want to do the same thing in a 35.41 - * completely different way. 35.42 - * 35.43 - * WARNING! The USB documentation is downright evil. Most of it 35.44 - * is just crap, written by a committee. You're better off ignoring 35.45 - * most of it, the important stuff is: 35.46 - * - the low-level protocol (fairly simple but lots of small details) 35.47 - * - working around the horridness of the rest 35.48 - */ 35.49 - 35.50 -#include <linux/config.h> 35.51 -#include <linux/module.h> 35.52 -#include <linux/kernel.h> 35.53 -#include <linux/init.h> 35.54 -#include <linux/sched.h> 35.55 -#include <linux/delay.h> 35.56 -#include <linux/slab.h> 35.57 -#include <linux/smp_lock.h> 35.58 -#include <linux/errno.h> 35.59 -#include <linux/interrupt.h> 35.60 -#include <linux/spinlock.h> 35.61 -#ifdef CONFIG_USB_DEBUG 35.62 -#define DEBUG 35.63 -#else 35.64 -#undef DEBUG 35.65 -#endif 35.66 -#include <linux/usb.h> 35.67 - 35.68 -#include <asm/irq.h> 35.69 -#include <asm/system.h> 35.70 - 35.71 -#include "xhci.h" 35.72 - 35.73 -#include "../../../../../drivers/usb/hcd.h" 35.74 - 35.75 -#include <asm-xen/xen-public/io/usbif.h> 35.76 -#include <asm/xen-public/io/domain_controller.h> 35.77 - 35.78 -/* 35.79 - * Version Information 35.80 - */ 35.81 -#define DRIVER_VERSION "v1.0" 35.82 -#define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, " \ 35.83 - "Randy Dunlap, Georg Acher, Deti Fliegl, " \ 35.84 - "Thomas Sailer, Roman Weissgaerber, Mark Williamson" 35.85 -#define DRIVER_DESC "Xen Virtual USB Host Controller Interface" 35.86 - 35.87 -/* 35.88 - * debug = 0, no debugging messages 35.89 - * debug = 1, dump failed URB's except for stalls 35.90 - * debug = 2, dump all failed URB's (including stalls) 35.91 - */ 35.92 -#ifdef DEBUG 35.93 -static int debug = 1; 35.94 -#else 35.95 -static int debug = 0; 35.96 -#endif 35.97 -MODULE_PARM(debug, "i"); 35.98 -MODULE_PARM_DESC(debug, "Debug level"); 35.99 -static char *errbuf; 35.100 -#define ERRBUF_LEN (PAGE_SIZE * 8) 35.101 - 35.102 -static int rh_submit_urb(struct urb *urb); 35.103 -static int rh_unlink_urb(struct urb *urb); 35.104 -static int xhci_unlink_urb(struct urb *urb); 35.105 -static void xhci_call_completion(struct urb *urb); 35.106 -static void xhci_drain_ring(void); 35.107 -static void xhci_transfer_result(struct xhci *xhci, struct urb *urb); 35.108 -static void xhci_finish_completion(void); 35.109 - 35.110 -#define MAX_URB_LOOP 2048 /* Maximum number of linked URB's */ 35.111 - 35.112 -static kmem_cache_t *xhci_up_cachep; /* urb_priv cache */ 35.113 -static struct xhci *xhci; /* XHCI structure for the interface */ 35.114 - 35.115 -/****************************************************************************** 35.116 - * DEBUGGING 35.117 - */ 35.118 - 35.119 -#ifdef DEBUG 35.120 - 35.121 -static void dump_urb(struct urb *urb) 35.122 -{ 35.123 - printk(KERN_DEBUG "dumping urb @ %p\n" 35.124 - " hcpriv = %p\n" 35.125 - " next = %p\n" 35.126 - " dev = %p\n" 35.127 - " pipe = 0x%lx\n" 35.128 - " status = %d\n" 35.129 - " transfer_flags = 0x%lx\n" 35.130 - " transfer_buffer = %p\n" 35.131 - " transfer_buffer_length = %d\n" 35.132 - " actual_length = %d\n" 35.133 - " bandwidth = %d\n" 35.134 - " setup_packet = %p\n", 35.135 - urb, urb->hcpriv, urb->next, urb->dev, urb->pipe, urb->status, 35.136 - urb->transfer_flags, urb->transfer_buffer, 35.137 - urb->transfer_buffer_length, urb->actual_length, urb->bandwidth, 35.138 - urb->setup_packet); 35.139 - if ( urb->setup_packet != NULL ) 35.140 - printk(KERN_DEBUG 35.141 - "setup = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n", 35.142 - urb->setup_packet[0], urb->setup_packet[1], 35.143 - urb->setup_packet[2], urb->setup_packet[3], 35.144 - urb->setup_packet[4], urb->setup_packet[5], 35.145 - urb->setup_packet[6], urb->setup_packet[7]); 35.146 - printk(KERN_DEBUG "complete = %p\n" 35.147 - "interval = %d\n", urb->complete, urb->interval); 35.148 - 35.149 -} 35.150 - 35.151 -static void xhci_show_resp(usbif_response_t *r) 35.152 -{ 35.153 - printk(KERN_DEBUG "dumping response @ %p\n" 35.154 - " id=0x%lx\n" 35.155 - " op=0x%x\n" 35.156 - " data=0x%x\n" 35.157 - " status=0x%x\n" 35.158 - " length=0x%lx\n", 35.159 - r->id, r->operation, r->data, r->status, r->length); 35.160 -} 35.161 - 35.162 -#define DPRINK(...) printk(KERN_DEBUG __VA_ARGS__) 35.163 - 35.164 -#else /* DEBUG */ 35.165 - 35.166 -#define dump_urb(blah) ((void)0) 35.167 -#define xhci_show_resp(blah) ((void)0) 35.168 -#define DPRINTK(blah,...) ((void)0) 35.169 - 35.170 -#endif /* DEBUG */ 35.171 - 35.172 -/****************************************************************************** 35.173 - * RING REQUEST HANDLING 35.174 - */ 35.175 - 35.176 -#define RING_PLUGGED(_hc) ( RING_FULL(&_hc->usb_ring) || _hc->recovery ) 35.177 - 35.178 -/** 35.179 - * xhci_construct_isoc - add isochronous information to a request 35.180 - */ 35.181 -static int xhci_construct_isoc(usbif_request_t *req, struct urb *urb) 35.182 -{ 35.183 - usbif_iso_t *schedule; 35.184 - int i; 35.185 - struct urb_priv *urb_priv = urb->hcpriv; 35.186 - 35.187 - req->num_iso = urb->number_of_packets; 35.188 - schedule = (usbif_iso_t *)__get_free_page(GFP_KERNEL); 35.189 - 35.190 - if ( schedule == NULL ) 35.191 - return -ENOMEM; 35.192 - 35.193 - for ( i = 0; i < req->num_iso; i++ ) 35.194 - { 35.195 - schedule[i].buffer_offset = urb->iso_frame_desc[i].offset; 35.196 - schedule[i].length = urb->iso_frame_desc[i].length; 35.197 - } 35.198 - 35.199 - urb_priv->schedule = schedule; 35.200 - req->iso_schedule = virt_to_mfn(schedule) << PAGE_SHIFT; 35.201 - 35.202 - return 0; 35.203 -} 35.204 - 35.205 -/** 35.206 - * xhci_queue_req - construct and queue request for an URB 35.207 - */ 35.208 -static int xhci_queue_req(struct urb *urb) 35.209 -{ 35.210 - unsigned long flags; 35.211 - usbif_request_t *req; 35.212 - usbif_front_ring_t *usb_ring = &xhci->usb_ring; 35.213 - 35.214 -#if DEBUG 35.215 - printk(KERN_DEBUG 35.216 - "usbif = %p, req_prod = %d (@ 0x%lx), resp_prod = %d, resp_cons = %d\n", 35.217 - usbif, usbif->req_prod, virt_to_mfn(&usbif->req_prod), 35.218 - usbif->resp_prod, xhci->usb_resp_cons); 35.219 -#endif 35.220 - 35.221 - spin_lock_irqsave(&xhci->ring_lock, flags); 35.222 - 35.223 - if ( RING_PLUGGED(xhci) ) 35.224 - { 35.225 - printk(KERN_WARNING 35.226 - "xhci_queue_req(): USB ring plugged, not queuing request\n"); 35.227 - spin_unlock_irqrestore(&xhci->ring_lock, flags); 35.228 - return -ENOBUFS; 35.229 - } 35.230 - 35.231 - /* Stick something in the shared communications ring. */ 35.232 - req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt); 35.233 - 35.234 - req->operation = USBIF_OP_IO; 35.235 - req->port = 0; /* We don't care what the port is. */ 35.236 - req->id = (unsigned long) urb->hcpriv; 35.237 - req->transfer_buffer = virt_to_mfn(urb->transfer_buffer) << PAGE_SHIFT; 35.238 - req->devnum = usb_pipedevice(urb->pipe); 35.239 - req->direction = usb_pipein(urb->pipe); 35.240 - req->speed = usb_pipeslow(urb->pipe); 35.241 - req->pipe_type = usb_pipetype(urb->pipe); 35.242 - req->length = urb->transfer_buffer_length; 35.243 - req->transfer_flags = urb->transfer_flags; 35.244 - req->endpoint = usb_pipeendpoint(urb->pipe); 35.245 - req->speed = usb_pipeslow(urb->pipe); 35.246 - req->timeout = urb->timeout * (1000 / HZ); 35.247 - 35.248 - if ( usb_pipetype(urb->pipe) == 0 ) /* ISO */ 35.249 - { 35.250 - int ret = xhci_construct_isoc(req, urb); 35.251 - if ( ret != 0 ) 35.252 - return ret; 35.253 - } 35.254 - 35.255 - if(urb->setup_packet != NULL) 35.256 - memcpy(req->setup, urb->setup_packet, 8); 35.257 - else 35.258 - memset(req->setup, 0, 8); 35.259 - 35.260 - usb_ring->req_prod_pvt++; 35.261 - RING_PUSH_REQUESTS(usb_ring); 35.262 - 35.263 - spin_unlock_irqrestore(&xhci->ring_lock, flags); 35.264 - 35.265 - notify_via_evtchn(xhci->evtchn); 35.266 - 35.267 - DPRINTK("Queued request for an URB.\n"); 35.268 - dump_urb(urb); 35.269 - 35.270 - return -EINPROGRESS; 35.271 -} 35.272 - 35.273 -/** 35.274 - * xhci_queue_probe - queue a probe request for a particular port 35.275 - */ 35.276 -static inline usbif_request_t *xhci_queue_probe(usbif_vdev_t port) 35.277 -{ 35.278 - usbif_request_t *req; 35.279 - usbif_front_ring_t *usb_ring = &xhci->usb_ring; 35.280 - 35.281 -#if DEBUG 35.282 - printk(KERN_DEBUG 35.283 - "queuing probe: req_prod = %d (@ 0x%lx), resp_prod = %d, " 35.284 - "resp_cons = %d\n", usbif->req_prod, 35.285 - virt_to_mfn(&usbif->req_prod), 35.286 - usbif->resp_prod, xhci->usb_resp_cons); 35.287 -#endif 35.288 - 35.289 - /* This is always called from the timer interrupt. */ 35.290 - spin_lock(&xhci->ring_lock); 35.291 - 35.292 - if ( RING_PLUGGED(xhci) ) 35.293 - { 35.294 - printk(KERN_WARNING 35.295 - "xhci_queue_probe(): ring full, not queuing request\n"); 35.296 - spin_unlock(&xhci->ring_lock); 35.297 - return NULL; 35.298 - } 35.299 - 35.300 - /* Stick something in the shared communications ring. */ 35.301 - req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt); 35.302 - 35.303 - memset(req, 0, sizeof(*req)); 35.304 - 35.305 - req->operation = USBIF_OP_PROBE; 35.306 - req->port = port; 35.307 - 35.308 - usb_ring->req_prod_pvt++; 35.309 - RING_PUSH_REQUESTS(usb_ring); 35.310 - 35.311 - spin_unlock(&xhci->ring_lock); 35.312 - 35.313 - notify_via_evtchn(xhci->evtchn); 35.314 - 35.315 - return req; 35.316 -} 35.317 - 35.318 -/** 35.319 - * xhci_port_reset - queue a reset request for a particular port 35.320 - */ 35.321 -static int xhci_port_reset(usbif_vdev_t port) 35.322 -{ 35.323 - usbif_request_t *req; 35.324 - usbif_front_ring_t *usb_ring = &xhci->usb_ring; 35.325 - 35.326 - /* Only ever happens from process context (hub thread). */ 35.327 - spin_lock_irq(&xhci->ring_lock); 35.328 - 35.329 - if ( RING_PLUGGED(xhci) ) 35.330 - { 35.331 - printk(KERN_WARNING 35.332 - "xhci_port_reset(): ring plugged, not queuing request\n"); 35.333 - spin_unlock_irq(&xhci->ring_lock); 35.334 - return -ENOBUFS; 35.335 - } 35.336 - 35.337 - /* We only reset one port at a time, so we only need one variable per 35.338 - * hub. */ 35.339 - xhci->awaiting_reset = 1; 35.340 - 35.341 - /* Stick something in the shared communications ring. */ 35.342 - req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt); 35.343 - 35.344 - memset(req, 0, sizeof(*req)); 35.345 - 35.346 - req->operation = USBIF_OP_RESET; 35.347 - req->port = port; 35.348 - 35.349 - usb_ring->req_prod_pvt++; 35.350 - RING_PUSH_REQUESTS(usb_ring); 35.351 - 35.352 - spin_unlock_irq(&xhci->ring_lock); 35.353 - 35.354 - notify_via_evtchn(xhci->evtchn); 35.355 - 35.356 - while ( xhci->awaiting_reset > 0 ) 35.357 - { 35.358 - mdelay(1); 35.359 - xhci_drain_ring(); 35.360 - } 35.361 - 35.362 - xhci->rh.ports[port].pe = 1; 35.363 - xhci->rh.ports[port].pe_chg = 1; 35.364 - 35.365 - return xhci->awaiting_reset; 35.366 -} 35.367 - 35.368 - 35.369 -/****************************************************************************** 35.370 - * RING RESPONSE HANDLING 35.371 - */ 35.372 - 35.373 -static void receive_usb_reset(usbif_response_t *resp) 35.374 -{ 35.375 - xhci->awaiting_reset = resp->status; 35.376 - rmb(); 35.377 - 35.378 -} 35.379 - 35.380 -static void receive_usb_probe(usbif_response_t *resp) 35.381 -{ 35.382 - spin_lock(&xhci->rh.port_state_lock); 35.383 - 35.384 - if ( resp->status >= 0 ) 35.385 - { 35.386 - if ( resp->status == 1 ) 35.387 - { 35.388 - /* If theres a device there and there wasn't one before there must 35.389 - * have been a connection status change. */ 35.390 - if( xhci->rh.ports[resp->data].cs == 0 ) 35.391 - { 35.392 - xhci->rh.ports[resp->data].cs = 1; 35.393 - xhci->rh.ports[resp->data].cs_chg = 1; 35.394 - } 35.395 - } 35.396 - else if ( resp->status == 0 ) 35.397 - { 35.398 - if(xhci->rh.ports[resp->data].cs == 1 ) 35.399 - { 35.400 - xhci->rh.ports[resp->data].cs = 0; 35.401 - xhci->rh.ports[resp->data].cs_chg = 1; 35.402 - xhci->rh.ports[resp->data].pe = 0; 35.403 - /* According to USB Spec v2.0, 11.24.2.7.2.2, we don't need 35.404 - * to set pe_chg since an error has not occurred. */ 35.405 - } 35.406 - } 35.407 - else 35.408 - printk(KERN_WARNING "receive_usb_probe(): unexpected status %d " 35.409 - "for port %d\n", resp->status, resp->data); 35.410 - } 35.411 - else if ( resp->status < 0) 35.412 - printk(KERN_WARNING "receive_usb_probe(): got error status %d\n", 35.413 - resp->status); 35.414 - 35.415 - spin_unlock(&xhci->rh.port_state_lock); 35.416 -} 35.417 - 35.418 -static void receive_usb_io(usbif_response_t *resp) 35.419 -{ 35.420 - struct urb_priv *urbp = (struct urb_priv *)resp->id; 35.421 - struct urb *urb = urbp->urb; 35.422 - 35.423 - urb->actual_length = resp->length; 35.424 - urbp->in_progress = 0; 35.425 - 35.426 - if( usb_pipetype(urb->pipe) == 0 ) /* ISO */ 35.427 - { 35.428 - int i; 35.429 - 35.430 - /* Copy ISO schedule results back in. */ 35.431 - for ( i = 0; i < urb->number_of_packets; i++ ) 35.432 - { 35.433 - urb->iso_frame_desc[i].status 35.434 - = urbp->schedule[i].status; 35.435 - urb->iso_frame_desc[i].actual_length 35.436 - = urbp->schedule[i].length; 35.437 - } 35.438 - free_page((unsigned long)urbp->schedule); 35.439 - } 35.440 - 35.441 - /* Only set status if it's not been changed since submission. It might 35.442 - * have been changed if the URB has been unlinked asynchronously, for 35.443 - * instance. */ 35.444 - if ( urb->status == -EINPROGRESS ) 35.445 - urbp->status = urb->status = resp->status; 35.446 -} 35.447 - 35.448 -/** 35.449 - * xhci_drain_ring - drain responses from the ring, calling handlers 35.450 - * 35.451 - * This may be called from interrupt context when an event is received from the 35.452 - * backend domain, or sometimes in process context whilst waiting for a port 35.453 - * reset or URB completion. 35.454 - */ 35.455 -static void xhci_drain_ring(void) 35.456 -{ 35.457 - struct list_head *tmp, *head; 35.458 - usbif_front_ring_t *usb_ring = &xhci->usb_ring; 35.459 - usbif_response_t *resp; 35.460 - RING_IDX i, rp; 35.461 - 35.462 - /* Walk the ring here to get responses, updating URBs to show what 35.463 - * completed. */ 35.464 - 35.465 - rp = usb_ring->sring->rsp_prod; 35.466 - rmb(); /* Ensure we see queued requests up to 'rp'. */ 35.467 - 35.468 - /* Take items off the comms ring, taking care not to overflow. */ 35.469 - for ( i = usb_ring->rsp_cons; i != rp; i++ ) 35.470 - { 35.471 - resp = RING_GET_RESPONSE(usb_ring, i); 35.472 - 35.473 - /* May need to deal with batching and with putting a ceiling on 35.474 - the number dispatched for performance and anti-dos reasons */ 35.475 - 35.476 - xhci_show_resp(resp); 35.477 - 35.478 - switch ( resp->operation ) 35.479 - { 35.480 - case USBIF_OP_PROBE: 35.481 - receive_usb_probe(resp); 35.482 - break; 35.483 - 35.484 - case USBIF_OP_IO: 35.485 - receive_usb_io(resp); 35.486 - break; 35.487 - 35.488 - case USBIF_OP_RESET: 35.489 - receive_usb_reset(resp); 35.490 - break; 35.491 - 35.492 - default: 35.493 - printk(KERN_WARNING 35.494 - "error: unknown USB io operation response [%d]\n", 35.495 - resp->operation); 35.496 - break; 35.497 - } 35.498 - } 35.499 - 35.500 - usb_ring->rsp_cons = i; 35.501 - 35.502 - /* Walk the list of pending URB's to see which ones completed and do 35.503 - * callbacks, etc. */ 35.504 - spin_lock(&xhci->urb_list_lock); 35.505 - head = &xhci->urb_list; 35.506 - tmp = head->next; 35.507 - while (tmp != head) { 35.508 - struct urb *urb = list_entry(tmp, struct urb, urb_list); 35.509 - 35.510 - tmp = tmp->next; 35.511 - 35.512 - /* Checks the status and does all of the magic necessary */ 35.513 - xhci_transfer_result(xhci, urb); 35.514 - } 35.515 - spin_unlock(&xhci->urb_list_lock); 35.516 - 35.517 - xhci_finish_completion(); 35.518 -} 35.519 - 35.520 - 35.521 -static void xhci_interrupt(int irq, void *__xhci, struct pt_regs *regs) 35.522 -{ 35.523 - xhci_drain_ring(); 35.524 -} 35.525 - 35.526 -/****************************************************************************** 35.527 - * HOST CONTROLLER FUNCTIONALITY 35.528 - */ 35.529 - 35.530 -/** 35.531 - * no-op implementation of private device alloc / free routines 35.532 - */ 35.533 -static int xhci_do_nothing_dev(struct usb_device *dev) 35.534 -{ 35.535 - return 0; 35.536 -} 35.537 - 35.538 -static inline void xhci_add_complete(struct urb *urb) 35.539 -{ 35.540 - struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; 35.541 - unsigned long flags; 35.542 - 35.543 - spin_lock_irqsave(&xhci->complete_list_lock, flags); 35.544 - list_add_tail(&urbp->complete_list, &xhci->complete_list); 35.545 - spin_unlock_irqrestore(&xhci->complete_list_lock, flags); 35.546 -} 35.547 - 35.548 -/* When this returns, the owner of the URB may free its 35.549 - * storage. 35.550 - * 35.551 - * We spin and wait for the URB to complete before returning. 35.552 - * 35.553 - * Call with urb->lock acquired. 35.554 - */ 35.555 -static void xhci_delete_urb(struct urb *urb) 35.556 -{ 35.557 - struct urb_priv *urbp; 35.558 - 35.559 - urbp = urb->hcpriv; 35.560 - 35.561 - /* If there's no urb_priv structure for this URB then it can't have 35.562 - * been submitted at all. */ 35.563 - if ( urbp == NULL ) 35.564 - return; 35.565 - 35.566 - /* For now we just spin until the URB completes. It shouldn't take too 35.567 - * long and we don't expect to have to do this very often. */ 35.568 - while ( urb->status == -EINPROGRESS ) 35.569 - { 35.570 - xhci_drain_ring(); 35.571 - mdelay(1); 35.572 - } 35.573 - 35.574 - /* Now we know that further transfers to the buffer won't 35.575 - * occur, so we can safely return. */ 35.576 -} 35.577 - 35.578 -static struct urb_priv *xhci_alloc_urb_priv(struct urb *urb) 35.579 -{ 35.580 - struct urb_priv *urbp; 35.581 - 35.582 - urbp = kmem_cache_alloc(xhci_up_cachep, SLAB_ATOMIC); 35.583 - if (!urbp) { 35.584 - err("xhci_alloc_urb_priv: couldn't allocate memory for urb_priv\n"); 35.585 - return NULL; 35.586 - } 35.587 - 35.588 - memset((void *)urbp, 0, sizeof(*urbp)); 35.589 - 35.590 - urbp->inserttime = jiffies; 35.591 - urbp->urb = urb; 35.592 - urbp->dev = urb->dev; 35.593 - 35.594 - INIT_LIST_HEAD(&urbp->complete_list); 35.595 - 35.596 - urb->hcpriv = urbp; 35.597 - 35.598 - return urbp; 35.599 -} 35.600 - 35.601 -/* 35.602 - * MUST be called with urb->lock acquired 35.603 - */ 35.604 -/* When is this called? Do we need to stop the transfer (as we 35.605 - * currently do)? */ 35.606 -static void xhci_destroy_urb_priv(struct urb *urb) 35.607 -{ 35.608 - struct urb_priv *urbp; 35.609 - 35.610 - urbp = (struct urb_priv *)urb->hcpriv; 35.611 - if (!urbp) 35.612 - return; 35.613 - 35.614 - if (!list_empty(&urb->urb_list)) 35.615 - warn("xhci_destroy_urb_priv: urb %p still on xhci->urb_list", urb); 35.616 - 35.617 - if (!list_empty(&urbp->complete_list)) 35.618 - warn("xhci_destroy_urb_priv: urb %p still on xhci->complete_list", urb); 35.619 - 35.620 - kmem_cache_free(xhci_up_cachep, urb->hcpriv); 35.621 - 35.622 - urb->hcpriv = NULL; 35.623 -} 35.624 - 35.625 -/** 35.626 - * Try to find URBs in progress on the same pipe to the same device. 35.627 - * 35.628 - * MUST be called with xhci->urb_list_lock acquired 35.629 - */ 35.630 -static struct urb *xhci_find_urb_ep(struct xhci *xhci, struct urb *urb) 35.631 -{ 35.632 - struct list_head *tmp, *head; 35.633 - 35.634 - /* We don't match Isoc transfers since they are special */ 35.635 - if (usb_pipeisoc(urb->pipe)) 35.636 - return NULL; 35.637 - 35.638 - head = &xhci->urb_list; 35.639 - tmp = head->next; 35.640 - while (tmp != head) { 35.641 - struct urb *u = list_entry(tmp, struct urb, urb_list); 35.642 - 35.643 - tmp = tmp->next; 35.644 - 35.645 - if (u->dev == urb->dev && u->pipe == urb->pipe && 35.646 - u->status == -EINPROGRESS) 35.647 - return u; 35.648 - } 35.649 - 35.650 - return NULL; 35.651 -} 35.652 - 35.653 -static int xhci_submit_urb(struct urb *urb) 35.654 -{ 35.655 - int ret = -EINVAL; 35.656 - unsigned long flags; 35.657 - struct urb *eurb; 35.658 - int bustime; 35.659 - 35.660 - DPRINTK("URB submitted to XHCI driver.\n"); 35.661 - dump_urb(urb); 35.662 - 35.663 - if (!urb) 35.664 - return -EINVAL; 35.665 - 35.666 - if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv) { 35.667 - warn("xhci_submit_urb: urb %p belongs to disconnected device or bus?", urb); 35.668 - return -ENODEV; 35.669 - } 35.670 - 35.671 - if ( urb->dev->devpath == NULL ) 35.672 - BUG(); 35.673 - 35.674 - usb_inc_dev_use(urb->dev); 35.675 - 35.676 - spin_lock_irqsave(&xhci->urb_list_lock, flags); 35.677 - spin_lock(&urb->lock); 35.678 - 35.679 - if (urb->status == -EINPROGRESS || urb->status == -ECONNRESET || 35.680 - urb->status == -ECONNABORTED) { 35.681 - dbg("xhci_submit_urb: urb not available to submit (status = %d)", urb->status); 35.682 - /* Since we can have problems on the out path */ 35.683 - spin_unlock(&urb->lock); 35.684 - spin_unlock_irqrestore(&xhci->urb_list_lock, flags); 35.685 - usb_dec_dev_use(urb->dev); 35.686 - 35.687 - return ret; 35.688 - } 35.689 - 35.690 - INIT_LIST_HEAD(&urb->urb_list); 35.691 - if (!xhci_alloc_urb_priv(urb)) { 35.692 - ret = -ENOMEM; 35.693 - 35.694 - goto out; 35.695 - } 35.696 - 35.697 - ( (struct urb_priv *)urb->hcpriv )->in_progress = 1; 35.698 - 35.699 - eurb = xhci_find_urb_ep(xhci, urb); 35.700 - if (eurb && !(urb->transfer_flags & USB_QUEUE_BULK)) { 35.701 - ret = -ENXIO; 35.702 - 35.703 - goto out; 35.704 - } 35.705 - 35.706 - /* Short circuit the virtual root hub */ 35.707 - if (urb->dev == xhci->rh.dev) { 35.708 - ret = rh_submit_urb(urb); 35.709 - 35.710 - goto out; 35.711 - } 35.712 - 35.713 - switch (usb_pipetype(urb->pipe)) { 35.714 - case PIPE_CONTROL: 35.715 - case PIPE_BULK: 35.716 - ret = xhci_queue_req(urb); 35.717 - break; 35.718 - 35.719 - case PIPE_INTERRUPT: 35.720 - if (urb->bandwidth == 0) { /* not yet checked/allocated */ 35.721 - bustime = usb_check_bandwidth(urb->dev, urb); 35.722 - if (bustime < 0) 35.723 - ret = bustime; 35.724 - else { 35.725 - ret = xhci_queue_req(urb); 35.726 - if (ret == -EINPROGRESS) 35.727 - usb_claim_bandwidth(urb->dev, urb, 35.728 - bustime, 0); 35.729 - } 35.730 - } else /* bandwidth is already set */ 35.731 - ret = xhci_queue_req(urb); 35.732 - break; 35.733 - 35.734 - case PIPE_ISOCHRONOUS: 35.735 - if (urb->bandwidth == 0) { /* not yet checked/allocated */ 35.736 - if (urb->number_of_packets <= 0) { 35.737 - ret = -EINVAL; 35.738 - break; 35.739 - } 35.740 - bustime = usb_check_bandwidth(urb->dev, urb); 35.741 - if (bustime < 0) { 35.742 - ret = bustime; 35.743 - break; 35.744 - } 35.745 - 35.746 - ret = xhci_queue_req(urb); 35.747 - if (ret == -EINPROGRESS) 35.748 - usb_claim_bandwidth(urb->dev, urb, bustime, 1); 35.749 - } else /* bandwidth is already set */ 35.750 - ret = xhci_queue_req(urb); 35.751 - break; 35.752 - } 35.753 -out: 35.754 - urb->status = ret; 35.755 - 35.756 - if (ret == -EINPROGRESS) { 35.757 - /* We use _tail to make find_urb_ep more efficient */ 35.758 - list_add_tail(&urb->urb_list, &xhci->urb_list); 35.759 - 35.760 - spin_unlock(&urb->lock); 35.761 - spin_unlock_irqrestore(&xhci->urb_list_lock, flags); 35.762 - 35.763 - return 0; 35.764 - } 35.765 - 35.766 - xhci_delete_urb(urb); 35.767 - 35.768 - spin_unlock(&urb->lock); 35.769 - spin_unlock_irqrestore(&xhci->urb_list_lock, flags); 35.770 - 35.771 - /* Only call completion if it was successful */ 35.772 - if (!ret) 35.773 - xhci_call_completion(urb); 35.774 - 35.775 - return ret; 35.776 -} 35.777 - 35.778 -/* 35.779 - * Return the result of a transfer 35.780 - * 35.781 - * MUST be called with urb_list_lock acquired 35.782 - */ 35.783 -static void xhci_transfer_result(struct xhci *xhci, struct urb *urb) 35.784 -{ 35.785 - int ret = 0; 35.786 - unsigned long flags; 35.787 - struct urb_priv *urbp; 35.788 - 35.789 - /* The root hub is special */ 35.790 - if (urb->dev == xhci->rh.dev) 35.791 - return; 35.792 - 35.793 - spin_lock_irqsave(&urb->lock, flags); 35.794 - 35.795 - urbp = (struct urb_priv *)urb->hcpriv; 35.796 - 35.797 - if ( ( (struct urb_priv *)urb->hcpriv )->in_progress ) 35.798 - ret = -EINPROGRESS; 35.799 - 35.800 - if (urb->actual_length < urb->transfer_buffer_length) { 35.801 - if (urb->transfer_flags & USB_DISABLE_SPD) { 35.802 - ret = -EREMOTEIO; 35.803 - } 35.804 - } 35.805 - 35.806 - if (urb->status == -EPIPE) 35.807 - { 35.808 - ret = urb->status; 35.809 - /* endpoint has stalled - mark it halted */ 35.810 - usb_endpoint_halt(urb->dev, usb_pipeendpoint(urb->pipe), 35.811 - usb_pipeout(urb->pipe)); 35.812 - } 35.813 - 35.814 - if ((debug == 1 && ret != 0 && ret != -EPIPE) || 35.815 - (ret != 0 && debug > 1)) { 35.816 - /* Some debugging code */ 35.817 - dbg("xhci_result_interrupt/bulk() failed with status %x", 35.818 - status); 35.819 - } 35.820 - 35.821 - if (ret == -EINPROGRESS) 35.822 - goto out; 35.823 - 35.824 - switch (usb_pipetype(urb->pipe)) { 35.825 - case PIPE_CONTROL: 35.826 - case PIPE_BULK: 35.827 - case PIPE_ISOCHRONOUS: 35.828 - /* Release bandwidth for Interrupt or Isoc. transfers */ 35.829 - /* Spinlock needed ? */ 35.830 - if (urb->bandwidth) 35.831 - usb_release_bandwidth(urb->dev, urb, 1); 35.832 - xhci_delete_urb(urb); 35.833 - break; 35.834 - case PIPE_INTERRUPT: 35.835 - /* Interrupts are an exception */ 35.836 - if (urb->interval) 35.837 - goto out_complete; 35.838 - 35.839 - /* Release bandwidth for Interrupt or Isoc. transfers */ 35.840 - /* Spinlock needed ? */ 35.841 - if (urb->bandwidth) 35.842 - usb_release_bandwidth(urb->dev, urb, 0); 35.843 - xhci_delete_urb(urb); 35.844 - break; 35.845 - default: 35.846 - info("xhci_transfer_result: unknown pipe type %d for urb %p\n", 35.847 - usb_pipetype(urb->pipe), urb); 35.848 - } 35.849 - 35.850 - /* Remove it from xhci->urb_list */ 35.851 - list_del_init(&urb->urb_list); 35.852 - 35.853 -out_complete: 35.854 - xhci_add_complete(urb); 35.855 - 35.856 -out: 35.857 - spin_unlock_irqrestore(&urb->lock, flags); 35.858 -} 35.859 - 35.860 -static int xhci_unlink_urb(struct urb *urb) 35.861 -{ 35.862 - unsigned long flags; 35.863 - struct urb_priv *urbp = urb->hcpriv; 35.864 - 35.865 - if (!urb) 35.866 - return -EINVAL; 35.867 - 35.868 - if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv) 35.869 - return -ENODEV; 35.870 - 35.871 - spin_lock_irqsave(&xhci->urb_list_lock, flags); 35.872 - spin_lock(&urb->lock); 35.873 - 35.874 - /* Release bandwidth for Interrupt or Isoc. transfers */ 35.875 - /* Spinlock needed ? */ 35.876 - if (urb->bandwidth) { 35.877 - switch (usb_pipetype(urb->pipe)) { 35.878 - case PIPE_INTERRUPT: 35.879 - usb_release_bandwidth(urb->dev, urb, 0); 35.880 - break; 35.881 - case PIPE_ISOCHRONOUS: 35.882 - usb_release_bandwidth(urb->dev, urb, 1); 35.883 - break; 35.884 - default: 35.885 - break; 35.886 - } 35.887 - } 35.888 - 35.889 - if (urb->status != -EINPROGRESS) { 35.890 - spin_unlock(&urb->lock); 35.891 - spin_unlock_irqrestore(&xhci->urb_list_lock, flags); 35.892 - return 0; 35.893 - } 35.894 - 35.895 - list_del_init(&urb->urb_list); 35.896 - 35.897 - /* Short circuit the virtual root hub */ 35.898 - if (urb->dev == xhci->rh.dev) { 35.899 - rh_unlink_urb(urb); 35.900 - 35.901 - spin_unlock(&urb->lock); 35.902 - spin_unlock_irqrestore(&xhci->urb_list_lock, flags); 35.903 - 35.904 - xhci_call_completion(urb); 35.905 - } else { 35.906 - if (urb->transfer_flags & USB_ASYNC_UNLINK) { 35.907 - /* We currently don't currently attempt to cancel URBs 35.908 - * that have been queued in the ring. We handle async 35.909 - * unlinked URBs when they complete. */ 35.910 - urbp->status = urb->status = -ECONNABORTED; 35.911 - spin_unlock(&urb->lock); 35.912 - spin_unlock_irqrestore(&xhci->urb_list_lock, flags); 35.913 - } else { 35.914 - urb->status = -ENOENT; 35.915 - 35.916 - spin_unlock(&urb->lock); 35.917 - spin_unlock_irqrestore(&xhci->urb_list_lock, flags); 35.918 - 35.919 - if (in_interrupt()) { /* wait at least 1 frame */ 35.920 - static int errorcount = 10; 35.921 - 35.922 - if (errorcount--) 35.923 - dbg("xhci_unlink_urb called from interrupt for urb %p", urb); 35.924 - udelay(1000); 35.925 - } else 35.926 - schedule_timeout(1+1*HZ/1000); 35.927 - 35.928 - xhci_delete_urb(urb); 35.929 - 35.930 - xhci_call_completion(urb); 35.931 - } 35.932 - } 35.933 - 35.934 - return 0; 35.935 -} 35.936 - 35.937 -static void xhci_call_completion(struct urb *urb) 35.938 -{ 35.939 - struct urb_priv *urbp; 35.940 - struct usb_device *dev = urb->dev; 35.941 - int is_ring = 0, killed, resubmit_interrupt, status; 35.942 - struct urb *nurb; 35.943 - unsigned long flags; 35.944 - 35.945 - spin_lock_irqsave(&urb->lock, flags); 35.946 - 35.947 - urbp = (struct urb_priv *)urb->hcpriv; 35.948 - if (!urbp || !urb->dev) { 35.949 - spin_unlock_irqrestore(&urb->lock, flags); 35.950 - return; 35.951 - } 35.952 - 35.953 - killed = (urb->status == -ENOENT || urb->status == -ECONNABORTED || 35.954 - urb->status == -ECONNRESET); 35.955 - resubmit_interrupt = (usb_pipetype(urb->pipe) == PIPE_INTERRUPT && 35.956 - urb->interval); 35.957 - 35.958 - nurb = urb->next; 35.959 - if (nurb && !killed) { 35.960 - int count = 0; 35.961 - 35.962 - while (nurb && nurb != urb && count < MAX_URB_LOOP) { 35.963 - if (nurb->status == -ENOENT || 35.964 - nurb->status == -ECONNABORTED || 35.965 - nurb->status == -ECONNRESET) { 35.966 - killed = 1; 35.967 - break; 35.968 - } 35.969 - 35.970 - nurb = nurb->next; 35.971 - count++; 35.972 - } 35.973 - 35.974 - if (count == MAX_URB_LOOP) 35.975 - err("xhci_call_completion: too many linked URB's, loop? (first loop)"); 35.976 - 35.977 - /* Check to see if chain is a ring */ 35.978 - is_ring = (nurb == urb); 35.979 - } 35.980 - 35.981 - status = urbp->status; 35.982 - if (!resubmit_interrupt || killed) 35.983 - /* We don't need urb_priv anymore */ 35.984 - xhci_destroy_urb_priv(urb); 35.985 - 35.986 - if (!killed) 35.987 - urb->status = status; 35.988 - 35.989 - spin_unlock_irqrestore(&urb->lock, flags); 35.990 - 35.991 - if (urb->complete) 35.992 - urb->complete(urb); 35.993 - 35.994 - if (resubmit_interrupt) 35.995 - /* Recheck the status. The completion handler may have */ 35.996 - /* unlinked the resubmitting interrupt URB */ 35.997 - killed = (urb->status == -ENOENT || 35.998 - urb->status == -ECONNABORTED || 35.999 - urb->status == -ECONNRESET); 35.1000 - 35.1001 - if (resubmit_interrupt && !killed) { 35.1002 - if ( urb->dev != xhci->rh.dev ) 35.1003 - xhci_queue_req(urb); /* XXX What if this fails? */ 35.1004 - /* Don't need to resubmit URBs for the virtual root dev. */ 35.1005 - } else { 35.1006 - if (is_ring && !killed) { 35.1007 - urb->dev = dev; 35.1008 - xhci_submit_urb(urb); 35.1009 - } else { 35.1010 - /* We decrement the usage count after we're done */ 35.1011 - /* with everything */ 35.1012 - usb_dec_dev_use(dev); 35.1013 - } 35.1014 - } 35.1015 -} 35.1016 - 35.1017 -static void xhci_finish_completion(void) 35.1018 -{ 35.1019 - struct list_head *tmp, *head; 35.1020 - unsigned long flags; 35.1021 - 35.1022 - spin_lock_irqsave(&xhci->complete_list_lock, flags); 35.1023 - head = &xhci->complete_list; 35.1024 - tmp = head->next; 35.1025 - while (tmp != head) { 35.1026 - struct urb_priv *urbp = list_entry(tmp, struct urb_priv, 35.1027 - complete_list); 35.1028 - struct urb *urb = urbp->urb; 35.1029 - 35.1030 - list_del_init(&urbp->complete_list); 35.1031 - spin_unlock_irqrestore(&xhci->complete_list_lock, flags); 35.1032 - 35.1033 - xhci_call_completion(urb); 35.1034 - 35.1035 - spin_lock_irqsave(&xhci->complete_list_lock, flags); 35.1036 - head = &xhci->complete_list; 35.1037 - tmp = head->next; 35.1038 - } 35.1039 - spin_unlock_irqrestore(&xhci->complete_list_lock, flags); 35.1040 -} 35.1041 - 35.1042 -static struct usb_operations xhci_device_operations = { 35.1043 - .allocate = xhci_do_nothing_dev, 35.1044 - .deallocate = xhci_do_nothing_dev, 35.1045 - /* It doesn't look like any drivers actually care what the frame number 35.1046 - * is at the moment! If necessary, we could approximate the current 35.1047 - * frame nubmer by passing it from the backend in response messages. */ 35.1048 - .get_frame_number = NULL, 35.1049 - .submit_urb = xhci_submit_urb, 35.1050 - .unlink_urb = xhci_unlink_urb 35.1051 -}; 35.1052 - 35.1053 -/****************************************************************************** 35.1054 - * VIRTUAL ROOT HUB EMULATION 35.1055 - */ 35.1056 - 35.1057 -static __u8 root_hub_dev_des[] = 35.1058 -{ 35.1059 - 0x12, /* __u8 bLength; */ 35.1060 - 0x01, /* __u8 bDescriptorType; Device */ 35.1061 - 0x00, /* __u16 bcdUSB; v1.0 */ 35.1062 - 0x01, 35.1063 - 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */ 35.1064 - 0x00, /* __u8 bDeviceSubClass; */ 35.1065 - 0x00, /* __u8 bDeviceProtocol; */ 35.1066 - 0x08, /* __u8 bMaxPacketSize0; 8 Bytes */ 35.1067 - 0x00, /* __u16 idVendor; */ 35.1068 - 0x00, 35.1069 - 0x00, /* __u16 idProduct; */ 35.1070 - 0x00, 35.1071 - 0x00, /* __u16 bcdDevice; */ 35.1072 - 0x00, 35.1073 - 0x00, /* __u8 iManufacturer; */ 35.1074 - 0x02, /* __u8 iProduct; */ 35.1075 - 0x01, /* __u8 iSerialNumber; */ 35.1076 - 0x01 /* __u8 bNumConfigurations; */ 35.1077 -}; 35.1078 - 35.1079 - 35.1080 -/* Configuration descriptor */ 35.1081 -static __u8 root_hub_config_des[] = 35.1082 -{ 35.1083 - 0x09, /* __u8 bLength; */ 35.1084 - 0x02, /* __u8 bDescriptorType; Configuration */ 35.1085 - 0x19, /* __u16 wTotalLength; */ 35.1086 - 0x00, 35.1087 - 0x01, /* __u8 bNumInterfaces; */ 35.1088 - 0x01, /* __u8 bConfigurationValue; */ 35.1089 - 0x00, /* __u8 iConfiguration; */ 35.1090 - 0x40, /* __u8 bmAttributes; 35.1091 - Bit 7: Bus-powered, 6: Self-powered, 35.1092 - Bit 5 Remote-wakeup, 4..0: resvd */ 35.1093 - 0x00, /* __u8 MaxPower; */ 35.1094 - 35.1095 - /* interface */ 35.1096 - 0x09, /* __u8 if_bLength; */ 35.1097 - 0x04, /* __u8 if_bDescriptorType; Interface */ 35.1098 - 0x00, /* __u8 if_bInterfaceNumber; */ 35.1099 - 0x00, /* __u8 if_bAlternateSetting; */ 35.1100 - 0x01, /* __u8 if_bNumEndpoints; */ 35.1101 - 0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */ 35.1102 - 0x00, /* __u8 if_bInterfaceSubClass; */ 35.1103 - 0x00, /* __u8 if_bInterfaceProtocol; */ 35.1104 - 0x00, /* __u8 if_iInterface; */ 35.1105 - 35.1106 - /* endpoint */ 35.1107 - 0x07, /* __u8 ep_bLength; */ 35.1108 - 0x05, /* __u8 ep_bDescriptorType; Endpoint */ 35.1109 - 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */ 35.1110 - 0x03, /* __u8 ep_bmAttributes; Interrupt */ 35.1111 - 0x08, /* __u16 ep_wMaxPacketSize; 8 Bytes */ 35.1112 - 0x00, 35.1113 - 0xff /* __u8 ep_bInterval; 255 ms */ 35.1114 -}; 35.1115 - 35.1116 -static __u8 root_hub_hub_des[] = 35.1117 -{ 35.1118 - 0x09, /* __u8 bLength; */ 35.1119 - 0x29, /* __u8 bDescriptorType; Hub-descriptor */ 35.1120 - 0x02, /* __u8 bNbrPorts; */ 35.1121 - 0x00, /* __u16 wHubCharacteristics; */ 35.1122 - 0x00, 35.1123 - 0x01, /* __u8 bPwrOn2pwrGood; 2ms */ 35.1124 - 0x00, /* __u8 bHubContrCurrent; 0 mA */ 35.1125 - 0x00, /* __u8 DeviceRemovable; *** 7 Ports max *** */ 35.1126 - 0xff /* __u8 PortPwrCtrlMask; *** 7 ports max *** */ 35.1127 -}; 35.1128 - 35.1129 -/* prepare Interrupt pipe transaction data; HUB INTERRUPT ENDPOINT */ 35.1130 -static int rh_send_irq(struct urb *urb) 35.1131 -{ 35.1132 - struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; 35.1133 - xhci_port_t *ports = xhci->rh.ports; 35.1134 - unsigned long flags; 35.1135 - int i, len = 1; 35.1136 - __u16 data = 0; 35.1137 - 35.1138 - spin_lock_irqsave(&urb->lock, flags); 35.1139 - for (i = 0; i < xhci->rh.numports; i++) { 35.1140 - /* Set a bit if anything at all has changed on the port, as per 35.1141 - * USB spec 11.12 */ 35.1142 - data |= (ports[i].cs_chg || ports[i].pe_chg ) 35.1143 - ? (1 << (i + 1)) 35.1144 - : 0; 35.1145 - 35.1146 - len = (i + 1) / 8 + 1; 35.1147 - } 35.1148 - 35.1149 - *(__u16 *) urb->transfer_buffer = cpu_to_le16(data); 35.1150 - urb->actual_length = len; 35.1151 - urbp->status = 0; 35.1152 - 35.1153 - spin_unlock_irqrestore(&urb->lock, flags); 35.1154 - 35.1155 - if ((data > 0) && (xhci->rh.send != 0)) { 35.1156 - dbg("root-hub INT complete: data: %x", data); 35.1157 - xhci_call_completion(urb); 35.1158 - } 35.1159 - 35.1160 - return 0; 35.1161 -} 35.1162 - 35.1163 -/* Virtual Root Hub INTs are polled by this timer every "interval" ms */ 35.1164 -static int rh_init_int_timer(struct urb *urb); 35.1165 - 35.1166 -static void rh_int_timer_do(unsigned long ptr) 35.1167 -{ 35.1168 - struct urb *urb = (struct urb *)ptr; 35.1169 - struct list_head list, *tmp, *head; 35.1170 - unsigned long flags; 35.1171 - int i; 35.1172 - 35.1173 - for ( i = 0; i < xhci->rh.numports; i++) 35.1174 - xhci_queue_probe(i); 35.1175 - 35.1176 - if (xhci->rh.send) 35.1177 - rh_send_irq(urb); 35.1178 - 35.1179 - INIT_LIST_HEAD(&list); 35.1180 - 35.1181 - spin_lock_irqsave(&xhci->urb_list_lock, flags); 35.1182 - head = &xhci->urb_list; 35.1183 - tmp = head->next; 35.1184 - while (tmp != head) { 35.1185 - struct urb *u = list_entry(tmp, struct urb, urb_list); 35.1186 - struct urb_priv *up = (struct urb_priv *)u->hcpriv; 35.1187 - 35.1188 - tmp = tmp->next; 35.1189 - 35.1190 - spin_lock(&u->lock); 35.1191 - 35.1192 - /* Check if the URB timed out */ 35.1193 - if (u->timeout && time_after_eq(jiffies, 35.1194 - up->inserttime + u->timeout)) { 35.1195 - list_del(&u->urb_list); 35.1196 - list_add_tail(&u->urb_list, &list); 35.1197 - } 35.1198 - 35.1199 - spin_unlock(&u->lock); 35.1200 - } 35.1201 - spin_unlock_irqrestore(&xhci->urb_list_lock, flags); 35.1202 - 35.1203 - head = &list; 35.1204 - tmp = head->next; 35.1205 - while (tmp != head) { 35.1206 - struct urb *u = list_entry(tmp, struct urb, urb_list); 35.1207 - 35.1208 - tmp = tmp->next; 35.1209 - 35.1210 - u->transfer_flags |= USB_ASYNC_UNLINK | USB_TIMEOUT_KILLED; 35.1211 - xhci_unlink_urb(u); 35.1212 - } 35.1213 - 35.1214 - rh_init_int_timer(urb); 35.1215 -} 35.1216 - 35.1217 -/* Root Hub INTs are polled by this timer */ 35.1218 -static int rh_init_int_timer(struct urb *urb) 35.1219 -{ 35.1220 - xhci->rh.interval = urb->interval; 35.1221 - init_timer(&xhci->rh.rh_int_timer); 35.1222 - xhci->rh.rh_int_timer.function = rh_int_timer_do; 35.1223 - xhci->rh.rh_int_timer.data = (unsigned long)urb; 35.1224 - xhci->rh.rh_int_timer.expires = jiffies 35.1225 - + (HZ * (urb->interval < 30 ? 30 : urb->interval)) / 1000; 35.1226 - add_timer(&xhci->rh.rh_int_timer); 35.1227 - 35.1228 - return 0; 35.1229 -} 35.1230 - 35.1231 -#define OK(x) len = (x); break 35.1232 - 35.1233 -/* Root Hub Control Pipe */ 35.1234 -static int rh_submit_urb(struct urb *urb) 35.1235 -{ 35.1236 - unsigned int pipe = urb->pipe; 35.1237 - struct usb_ctrlrequest *cmd = 35.1238 - (struct usb_ctrlrequest *)urb->setup_packet; 35.1239 - void *data = urb->transfer_buffer; 35.1240 - int leni = urb->transfer_buffer_length; 35.1241 - int len = 0; 35.1242 - xhci_port_t *status; 35.1243 - int stat = 0; 35.1244 - int i; 35.1245 - int retstatus; 35.1246 - unsigned long flags; 35.1247 - 35.1248 - __u16 cstatus; 35.1249 - __u16 bmRType_bReq; 35.1250 - __u16 wValue; 35.1251 - __u16 wIndex; 35.1252 - __u16 wLength; 35.1253 - 35.1254 - if (usb_pipetype(pipe) == PIPE_INTERRUPT) { 35.1255 - xhci->rh.urb = urb; 35.1256 - xhci->rh.send = 1; 35.1257 - xhci->rh.interval = urb->interval; 35.1258 - rh_init_int_timer(urb); 35.1259 - 35.1260 - return -EINPROGRESS; 35.1261 - } 35.1262 - 35.1263 - bmRType_bReq = cmd->bRequestType | cmd->bRequest << 8; 35.1264 - wValue = le16_to_cpu(cmd->wValue); 35.1265 - wIndex = le16_to_cpu(cmd->wIndex); 35.1266 - wLength = le16_to_cpu(cmd->wLength); 35.1267 - 35.1268 - for (i = 0; i < 8; i++) 35.1269 - xhci->rh.c_p_r[i] = 0; 35.1270 - 35.1271 - status = &xhci->rh.ports[wIndex - 1]; 35.1272 - 35.1273 - spin_lock_irqsave(&xhci->rh.port_state_lock, flags); 35.1274 - 35.1275 - switch (bmRType_bReq) { 35.1276 - /* Request Destination: 35.1277 - without flags: Device, 35.1278 - RH_INTERFACE: interface, 35.1279 - RH_ENDPOINT: endpoint, 35.1280 - RH_CLASS means HUB here, 35.1281 - RH_OTHER | RH_CLASS almost ever means HUB_PORT here 35.1282 - */ 35.1283 - 35.1284 - case RH_GET_STATUS: 35.1285 - *(__u16 *)data = cpu_to_le16(1); 35.1286 - OK(2); 35.1287 - case RH_GET_STATUS | RH_INTERFACE: 35.1288 - *(__u16 *)data = cpu_to_le16(0); 35.1289 - OK(2); 35.1290 - case RH_GET_STATUS | RH_ENDPOINT: 35.1291 - *(__u16 *)data = cpu_to_le16(0); 35.1292 - OK(2); 35.1293 - case RH_GET_STATUS | RH_CLASS: 35.1294 - *(__u32 *)data = cpu_to_le32(0); 35.1295 - OK(4); /* hub power */ 35.1296 - case RH_GET_STATUS | RH_OTHER | RH_CLASS: 35.1297 - cstatus = (status->cs_chg) | 35.1298 - (status->pe_chg << 1) | 35.1299 - (xhci->rh.c_p_r[wIndex - 1] << 4); 35.1300 - retstatus = (status->cs) | 35.1301 - (status->pe << 1) | 35.1302 - (status->susp << 2) | 35.1303 - (1 << 8) | /* power on */ 35.1304 - (status->lsda << 9); 35.1305 - *(__u16 *)data = cpu_to_le16(retstatus); 35.1306 - *(__u16 *)(data + 2) = cpu_to_le16(cstatus); 35.1307 - OK(4); 35.1308 - case RH_CLEAR_FEATURE | RH_ENDPOINT: 35.1309 - switch (wValue) { 35.1310 - case RH_ENDPOINT_STALL: 35.1311 - OK(0); 35.1312 - } 35.1313 - break; 35.1314 - case RH_CLEAR_FEATURE | RH_CLASS: 35.1315 - switch (wValue) { 35.1316 - case RH_C_HUB_OVER_CURRENT: 35.1317 - OK(0); /* hub power over current */ 35.1318 - } 35.1319 - break; 35.1320 - case RH_CLEAR_FEATURE | RH_OTHER | RH_CLASS: 35.1321 - switch (wValue) { 35.1322 - case RH_PORT_ENABLE: 35.1323 - status->pe = 0; 35.1324 - OK(0); 35.1325 - case RH_PORT_SUSPEND: 35.1326 - status->susp = 0; 35.1327 - OK(0); 35.1328 - case RH_PORT_POWER: 35.1329 - OK(0); /* port power */ 35.1330 - case RH_C_PORT_CONNECTION: 35.1331 - status->cs_chg = 0; 35.1332 - OK(0); 35.1333 - case RH_C_PORT_ENABLE: 35.1334 - status->pe_chg = 0; 35.1335 - OK(0); 35.1336 - case RH_C_PORT_SUSPEND: 35.1337 - /*** WR_RH_PORTSTAT(RH_PS_PSSC); */ 35.1338 - OK(0); 35.1339 - case RH_C_PORT_OVER_CURRENT: 35.1340 - OK(0); /* port power over current */ 35.1341 - case RH_C_PORT_RESET: 35.1342 - xhci->rh.c_p_r[wIndex - 1] = 0; 35.1343 - OK(0); 35.1344 - } 35.1345 - break; 35.1346 - case RH_SET_FEATURE | RH_OTHER | RH_CLASS: 35.1347 - switch (wValue) { 35.1348 - case RH_PORT_SUSPEND: 35.1349 - status->susp = 1; 35.1350 - OK(0); 35.1351 - case RH_PORT_RESET: 35.1352 - { 35.1353 - int ret; 35.1354 - xhci->rh.c_p_r[wIndex - 1] = 1; 35.1355 - status->pr = 0; 35.1356 - status->pe = 1; 35.1357 - ret = xhci_port_reset(wIndex - 1); 35.1358 - /* XXX MAW: should probably cancel queued transfers during reset... *\/ */ 35.1359 - if ( ret == 0 ) { OK(0); } 35.1360 - else { return ret; } 35.1361 - } 35.1362 - break; 35.1363 - case RH_PORT_POWER: 35.1364 - OK(0); /* port power ** */ 35.1365 - case RH_PORT_ENABLE: 35.1366 - status->pe = 1; 35.1367 - OK(0); 35.1368 - } 35.1369 - break; 35.1370 - case RH_SET_ADDRESS: 35.1371 - xhci->rh.devnum = wValue; 35.1372 - OK(0); 35.1373 - case RH_GET_DESCRIPTOR: 35.1374 - switch ((wValue & 0xff00) >> 8) { 35.1375 - case 0x01: /* device descriptor */ 35.1376 - len = min_t(unsigned int, leni, 35.1377 - min_t(unsigned int, 35.1378 - sizeof(root_hub_dev_des), wLength)); 35.1379 - memcpy(data, root_hub_dev_des, len); 35.1380 - OK(len); 35.1381 - case 0x02: /* configuration descriptor */ 35.1382 - len = min_t(unsigned int, leni, 35.1383 - min_t(unsigned int, 35.1384 - sizeof(root_hub_config_des), wLength)); 35.1385 - memcpy (data, root_hub_config_des, len); 35.1386 - OK(len); 35.1387 - case 0x03: /* string descriptors */ 35.1388 - len = usb_root_hub_string (wValue & 0xff, 35.1389 - 0, "XHCI-alt", 35.1390 - data, wLength); 35.1391 - if (len > 0) { 35.1392 - OK(min_t(int, leni, len)); 35.1393 - } else 35.1394 - stat = -EPIPE; 35.1395 - } 35.1396 - break; 35.1397 - case RH_GET_DESCRIPTOR | RH_CLASS: 35.1398 - root_hub_hub_des[2] = xhci->rh.numports; 35.1399 - len = min_t(unsigned int, leni, 35.1400 - min_t(unsigned int, sizeof(root_hub_hub_des), wLength)); 35.1401 - memcpy(data, root_hub_hub_des, len); 35.1402 - OK(len); 35.1403 - case RH_GET_CONFIGURATION: 35.1404 - *(__u8 *)data = 0x01; 35.1405 - OK(1); 35.1406 - case RH_SET_CONFIGURATION: 35.1407 - OK(0); 35.1408 - case RH_GET_INTERFACE | RH_INTERFACE: 35.1409 - *(__u8 *)data = 0x00; 35.1410 - OK(1); 35.1411 - case RH_SET_INTERFACE | RH_INTERFACE: 35.1412 - OK(0); 35.1413 - default: 35.1414 - stat = -EPIPE; 35.1415 - } 35.1416 - 35.1417 - spin_unlock_irqrestore(&xhci->rh.port_state_lock, flags); 35.1418 - 35.1419 - urb->actual_length = len; 35.1420 - 35.1421 - return stat; 35.1422 -} 35.1423 - 35.1424 -/* 35.1425 - * MUST be called with urb->lock acquired 35.1426 - */ 35.1427 -static int rh_unlink_urb(struct urb *urb) 35.1428 -{ 35.1429 - if (xhci->rh.urb == urb) { 35.1430 - urb->status = -ENOENT; 35.1431 - xhci->rh.send = 0; 35.1432 - xhci->rh.urb = NULL; 35.1433 - del_timer(&xhci->rh.rh_int_timer); 35.1434 - } 35.1435 - return 0; 35.1436 -} 35.1437 - 35.1438 -/****************************************************************************** 35.1439 - * CONTROL PLANE FUNCTIONALITY 35.1440 - */ 35.1441 - 35.1442 -/** 35.1443 - * alloc_xhci - initialise a new virtual root hub for a new USB device channel 35.1444 - */ 35.1445 -static int alloc_xhci(void) 35.1446 -{ 35.1447 - int retval; 35.1448 - struct usb_bus *bus; 35.1449 - 35.1450 - retval = -EBUSY; 35.1451 - 35.1452 - xhci = kmalloc(sizeof(*xhci), GFP_KERNEL); 35.1453 - if (!xhci) { 35.1454 - err("couldn't allocate xhci structure"); 35.1455 - retval = -ENOMEM; 35.1456 - goto err_alloc_xhci; 35.1457 - } 35.1458 - 35.1459 - xhci->state = USBIF_STATE_CLOSED; 35.1460 - 35.1461 - spin_lock_init(&xhci->urb_list_lock); 35.1462 - INIT_LIST_HEAD(&xhci->urb_list); 35.1463 - 35.1464 - spin_lock_init(&xhci->complete_list_lock); 35.1465 - INIT_LIST_HEAD(&xhci->complete_list); 35.1466 - 35.1467 - spin_lock_init(&xhci->frame_list_lock); 35.1468 - 35.1469 - bus = usb_alloc_bus(&xhci_device_operations); 35.1470 - 35.1471 - if (!bus) { 35.1472 - err("unable to allocate bus"); 35.1473 - goto err_alloc_bus; 35.1474 - } 35.1475 - 35.1476 - xhci->bus = bus; 35.1477 - bus->bus_name = "XHCI"; 35.1478 - bus->hcpriv = xhci; 35.1479 - 35.1480 - usb_register_bus(xhci->bus); 35.1481 - 35.1482 - /* Initialize the root hub */ 35.1483 - 35.1484 - xhci->rh.numports = 0; 35.1485 - 35.1486 - xhci->bus->root_hub = xhci->rh.dev = usb_alloc_dev(NULL, xhci->bus); 35.1487 - if (!xhci->rh.dev) { 35.1488 - err("unable to allocate root hub"); 35.1489 - goto err_alloc_root_hub; 35.1490 - } 35.1491 - 35.1492 - xhci->state = 0; 35.1493 - 35.1494 - return 0; 35.1495 - 35.1496 -/* 35.1497 - * error exits: 35.1498 - */ 35.1499 -err_alloc_root_hub: 35.1500 - usb_deregister_bus(xhci->bus); 35.1501 - usb_free_bus(xhci->bus); 35.1502 - xhci->bus = NULL; 35.1503 - 35.1504 -err_alloc_bus: 35.1505 - kfree(xhci); 35.1506 - 35.1507 -err_alloc_xhci: 35.1508 - return retval; 35.1509 -} 35.1510 - 35.1511 -/** 35.1512 - * usbif_status_change - deal with an incoming USB_INTERFACE_STATUS_ message 35.1513 - */ 35.1514 -static void usbif_status_change(usbif_fe_interface_status_changed_t *status) 35.1515 -{ 35.1516 - ctrl_msg_t cmsg; 35.1517 - usbif_fe_interface_connect_t up; 35.1518 - long rc; 35.1519 - usbif_sring_t *sring; 35.1520 - 35.1521 - switch ( status->status ) 35.1522 - { 35.1523 - case USBIF_INTERFACE_STATUS_DESTROYED: 35.1524 - printk(KERN_WARNING "Unexpected usbif-DESTROYED message in state %d\n", 35.1525 - xhci->state); 35.1526 - break; 35.1527 - 35.1528 - case USBIF_INTERFACE_STATUS_DISCONNECTED: 35.1529 - if ( xhci->state != USBIF_STATE_CLOSED ) 35.1530 - { 35.1531 - printk(KERN_WARNING "Unexpected usbif-DISCONNECTED message" 35.1532 - " in state %d\n", xhci->state); 35.1533 - break; 35.1534 - /* Not bothering to do recovery here for now. Keep things 35.1535 - * simple. */ 35.1536 - 35.1537 - spin_lock_irq(&xhci->ring_lock); 35.1538 - 35.1539 - /* Clean up resources. */ 35.1540 - free_page((unsigned long)xhci->usb_ring.sring); 35.1541 - unbind_evtchn_from_irqhandler(xhci->evtchn, xhci); 35.1542 - 35.1543 - /* Plug the ring. */ 35.1544 - xhci->recovery = 1; 35.1545 - wmb(); 35.1546 - 35.1547 - spin_unlock_irq(&xhci->ring_lock); 35.1548 - } 35.1549 - 35.1550 - /* Move from CLOSED to DISCONNECTED state. */ 35.1551 - sring = (usbif_sring_t *)__get_free_page(GFP_KERNEL); 35.1552 - SHARED_RING_INIT(sring); 35.1553 - FRONT_RING_INIT(&xhci->usb_ring, sring, PAGE_SIZE); 35.1554 - xhci->state = USBIF_STATE_DISCONNECTED; 35.1555 - 35.1556 - /* Construct an interface-CONNECT message for the domain controller. */ 35.1557 - cmsg.type = CMSG_USBIF_FE; 35.1558 - cmsg.subtype = CMSG_USBIF_FE_INTERFACE_CONNECT; 35.1559 - cmsg.length = sizeof(usbif_fe_interface_connect_t); 35.1560 - up.shmem_frame = virt_to_mfn(sring); 35.1561 - memcpy(cmsg.msg, &up, sizeof(up)); 35.1562 - 35.1563 - /* Tell the controller to bring up the interface. */ 35.1564 - ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); 35.1565 - break; 35.1566 - 35.1567 - case USBIF_INTERFACE_STATUS_CONNECTED: 35.1568 - if ( xhci->state == USBIF_STATE_CLOSED ) 35.1569 - { 35.1570 - printk(KERN_WARNING "Unexpected usbif-CONNECTED message" 35.1571 - " in state %d\n", xhci->state); 35.1572 - break; 35.1573 - } 35.1574 - 35.1575 - xhci->evtchn = status->evtchn; 35.1576 - xhci->bandwidth = status->bandwidth; 35.1577 - xhci->rh.numports = status->num_ports; 35.1578 - 35.1579 - xhci->rh.ports = kmalloc (sizeof(xhci_port_t) * xhci->rh.numports, GFP_KERNEL); 35.1580 - 35.1581 - if ( xhci->rh.ports == NULL ) 35.1582 - goto alloc_ports_nomem; 35.1583 - 35.1584 - memset(xhci->rh.ports, 0, sizeof(xhci_port_t) * xhci->rh.numports); 35.1585 - 35.1586 - usb_connect(xhci->rh.dev); 35.1587 - 35.1588 - if (usb_new_device(xhci->rh.dev) != 0) { 35.1589 - err("unable to start root hub"); 35.1590 - } 35.1591 - 35.1592 - /* Allocate the appropriate USB bandwidth here... Need to 35.1593 - * somehow know what the total available is thought to be so we 35.1594 - * can calculate the reservation correctly. */ 35.1595 - usb_claim_bandwidth(xhci->rh.dev, xhci->rh.urb, 35.1596 - 1000 - xhci->bandwidth, 0); 35.1597 - 35.1598 - if ( (rc = bind_evtchn_to_irqhandler(xhci->evtchn, xhci_interrupt, 35.1599 - SA_SAMPLE_RANDOM, "usbif", xhci)) ) 35.1600 - printk(KERN_ALERT"usbfront request_irq failed (%ld)\n",rc); 35.1601 - 35.1602 - DPRINTK(KERN_INFO __FILE__ 35.1603 - ": USB XHCI: SHM at %p (0x%lx), EVTCHN %d\n", 35.1604 - xhci->usb_ring.sring, virt_to_mfn(xhci->usbif), 35.1605 - xhci->evtchn); 35.1606 - 35.1607 - xhci->state = USBIF_STATE_CONNECTED; 35.1608 - 35.1609 - break; 35.1610 - 35.1611 - default: 35.1612 - printk(KERN_WARNING "Status change to unknown value %d\n", 35.1613 - status->status); 35.1614 - break; 35.1615 - } 35.1616 - 35.1617 - return; 35.1618 - 35.1619 - alloc_ports_nomem: 35.1620 - printk(KERN_WARNING "Failed to allocate port memory, XHCI failed to connect.\n"); 35.1621 - return; 35.1622 -} 35.1623 - 35.1624 -/** 35.1625 - * usbif_ctrlif_rx - demux control messages by subtype 35.1626 - */ 35.1627 -static void usbif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id) 35.1628 -{ 35.1629 - switch ( msg->subtype ) 35.1630 - { 35.1631 - case CMSG_USBIF_FE_INTERFACE_STATUS_CHANGED: 35.1632 - usbif_status_change((usbif_fe_interface_status_changed_t *) 35.1633 - &msg->msg[0]); 35.1634 - break; 35.1635 - 35.1636 - /* New interface...? */ 35.1637 - default: 35.1638 - msg->length = 0; 35.1639 - break; 35.1640 - } 35.1641 - 35.1642 - ctrl_if_send_response(msg); 35.1643 -} 35.1644 - 35.1645 -static void send_driver_up(void) 35.1646 -{ 35.1647 - control_msg_t cmsg; 35.1648 - usbif_fe_interface_status_changed_t st; 35.1649 - 35.1650 - /* Send a driver-UP notification to the domain controller. */ 35.1651 - cmsg.type = CMSG_USBIF_FE; 35.1652 - cmsg.subtype = CMSG_USBIF_FE_DRIVER_STATUS_CHANGED; 35.1653 - cmsg.length = sizeof(usbif_fe_driver_status_changed_t); 35.1654 - st.status = USBIF_DRIVER_STATUS_UP; 35.1655 - memcpy(cmsg.msg, &st, sizeof(st)); 35.1656 - ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); 35.1657 -} 35.1658 - 35.1659 -void usbif_resume(void) 35.1660 -{ 35.1661 - int i; 35.1662 - 35.1663 - /* Fake disconnection on all virtual USB ports (suspending / migrating 35.1664 - * will destroy hard state associated will the USB devices anyhow). */ 35.1665 - /* No need to lock here. */ 35.1666 - for ( i = 0; i < xhci->rh.numports; i++ ) 35.1667 - { 35.1668 - xhci->rh.ports[i].cs = 0; 35.1669 - xhci->rh.ports[i].cs_chg = 1; 35.1670 - xhci->rh.ports[i].pe = 0; 35.1671 - } 35.1672 - 35.1673 - send_driver_up(); 35.1674 -} 35.1675 - 35.1676 -static int __init xhci_hcd_init(void) 35.1677 -{ 35.1678 - int retval = -ENOMEM, i; 35.1679 - 35.1680 - if ( (xen_start_info->flags & SIF_INITDOMAIN) || 35.1681 - (xen_start_info->flags & SIF_USB_BE_DOMAIN) ) 35.1682 - return 0; 35.1683 - 35.1684 - info(DRIVER_DESC " " DRIVER_VERSION); 35.1685 - 35.1686 - if (debug) { 35.1687 - errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL); 35.1688 - if (!errbuf) 35.1689 - goto errbuf_failed; 35.1690 - } 35.1691 - 35.1692 - xhci_up_cachep = kmem_cache_create("xhci_urb_priv", 35.1693 - sizeof(struct urb_priv), 0, 0, NULL, NULL); 35.1694 - if (!xhci_up_cachep) 35.1695 - goto up_failed; 35.1696 - 35.1697 - /* Let the domain controller know we're here. For now we wait until 35.1698 - * connection, as for the block and net drivers. This is only strictly 35.1699 - * necessary if we're going to boot off a USB device. */ 35.1700 - printk(KERN_INFO "Initialising Xen virtual USB hub\n"); 35.1701 - 35.1702 - (void)ctrl_if_register_receiver(CMSG_USBIF_FE, usbif_ctrlif_rx, 35.1703 - CALLBACK_IN_BLOCKING_CONTEXT); 35.1704 - 35.1705 - alloc_xhci(); 35.1706 - 35.1707 - send_driver_up(); 35.1708 - 35.1709 - /* 35.1710 - * We should read 'nr_interfaces' from response message and wait 35.1711 - * for notifications before proceeding. For now we assume that we 35.1712 - * will be notified of exactly one interface. 35.1713 - */ 35.1714 - for ( i=0; (xhci->state != USBIF_STATE_CONNECTED) && (i < 10*HZ); i++ ) 35.1715 - { 35.1716 - set_current_state(TASK_INTERRUPTIBLE); 35.1717 - schedule_timeout(1); 35.1718 - } 35.1719 - 35.1720 - if (xhci->state != USBIF_STATE_CONNECTED) 35.1721 - printk(KERN_WARNING "Timeout connecting USB frontend driver!\n"); 35.1722 - 35.1723 - return 0; 35.1724 - 35.1725 -up_failed: 35.1726 - if (errbuf) 35.1727 - kfree(errbuf); 35.1728 - 35.1729 -errbuf_failed: 35.1730 - return retval; 35.1731 -} 35.1732 - 35.1733 -module_init(xhci_hcd_init); 35.1734 - 35.1735 -MODULE_AUTHOR(DRIVER_AUTHOR); 35.1736 -MODULE_DESCRIPTION(DRIVER_DESC); 35.1737 -MODULE_LICENSE("GPL"); 35.1738 -
36.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbfront/xhci.h Thu Sep 22 16:05:44 2005 +0100 36.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 36.3 @@ -1,182 +0,0 @@ 36.4 -/****************************************************************************** 36.5 - * xhci.h 36.6 - * 36.7 - * Private definitions for the Xen Virtual USB Controller. Based on 36.8 - * drivers/usb/host/uhci.h from Linux. Copyright for the imported content is 36.9 - * retained by the original authors. 36.10 - * 36.11 - * Modifications are: 36.12 - * Copyright (C) 2004 Intel Research Cambridge 36.13 - * Copyright (C) 2004, 2005 Mark Williamson 36.14 - */ 36.15 - 36.16 -#ifndef __LINUX_XHCI_H 36.17 -#define __LINUX_XHCI_H 36.18 - 36.19 -#include <linux/list.h> 36.20 -#include <linux/usb.h> 36.21 -#include <asm-xen/xen-public/io/usbif.h> 36.22 -#include <linux/spinlock.h> 36.23 - 36.24 -/* xhci_port_t - current known state of a virtual hub ports */ 36.25 -typedef struct { 36.26 - unsigned int cs :1; /* Connection status. */ 36.27 - unsigned int cs_chg :1; /* Connection status change. */ 36.28 - unsigned int pe :1; /* Port enable. */ 36.29 - unsigned int pe_chg :1; /* Port enable change. */ 36.30 - unsigned int susp :1; /* Suspended. */ 36.31 - unsigned int lsda :1; /* Low speed device attached. */ 36.32 - unsigned int pr :1; /* Port reset. */ 36.33 -} xhci_port_t; 36.34 - 36.35 -/* struct virt_root_hub - state related to the virtual root hub */ 36.36 -struct virt_root_hub { 36.37 - struct usb_device *dev; 36.38 - int devnum; /* Address of Root Hub endpoint */ 36.39 - struct urb *urb; 36.40 - void *int_addr; 36.41 - int send; 36.42 - int interval; 36.43 - int numports; 36.44 - int c_p_r[8]; 36.45 - struct timer_list rh_int_timer; 36.46 - spinlock_t port_state_lock; 36.47 - xhci_port_t *ports; 36.48 -}; 36.49 - 36.50 -/* struct xhci - contains the state associated with a single USB interface */ 36.51 -struct xhci { 36.52 - 36.53 -#ifdef CONFIG_PROC_FS 36.54 - /* procfs */ 36.55 - int num; 36.56 - struct proc_dir_entry *proc_entry; 36.57 -#endif 36.58 - 36.59 - int evtchn; /* Interdom channel to backend */ 36.60 - enum { 36.61 - USBIF_STATE_CONNECTED = 2, 36.62 - USBIF_STATE_DISCONNECTED = 1, 36.63 - USBIF_STATE_CLOSED = 0 36.64 - } state; /* State of this USB interface */ 36.65 - unsigned long recovery; /* boolean recovery in progress flag */ 36.66 - 36.67 - unsigned long bandwidth; 36.68 - 36.69 - struct usb_bus *bus; 36.70 - 36.71 - /* Main list of URB's currently controlled by this HC */ 36.72 - spinlock_t urb_list_lock; 36.73 - struct list_head urb_list; /* P: xhci->urb_list_lock */ 36.74 - 36.75 - /* List of URB's awaiting completion callback */ 36.76 - spinlock_t complete_list_lock; 36.77 - struct list_head complete_list; /* P: xhci->complete_list_lock */ 36.78 - 36.79 - struct virt_root_hub rh; /* private data of the virtual root hub */ 36.80 - 36.81 - spinlock_t ring_lock; 36.82 - usbif_front_ring_t usb_ring; 36.83 - 36.84 - int awaiting_reset; 36.85 -}; 36.86 - 36.87 -/* per-URB private data structure for the host controller */ 36.88 -struct urb_priv { 36.89 - struct urb *urb; 36.90 - usbif_iso_t *schedule; 36.91 - struct usb_device *dev; 36.92 - 36.93 - int in_progress : 1; /* QH was queued (not linked in) */ 36.94 - int short_control_packet : 1; /* If we get a short packet during */ 36.95 - /* a control transfer, retrigger */ 36.96 - /* the status phase */ 36.97 - 36.98 - int status; /* Final status */ 36.99 - 36.100 - unsigned long inserttime; /* In jiffies */ 36.101 - 36.102 - struct list_head complete_list; /* P: xhci->complete_list_lock */ 36.103 -}; 36.104 - 36.105 -/* 36.106 - * Locking in xhci.c 36.107 - * 36.108 - * spinlocks are used extensively to protect the many lists and data 36.109 - * structures we have. It's not that pretty, but it's necessary. We 36.110 - * need to be done with all of the locks (except complete_list_lock) when 36.111 - * we call urb->complete. I've tried to make it simple enough so I don't 36.112 - * have to spend hours racking my brain trying to figure out if the 36.113 - * locking is safe. 36.114 - * 36.115 - * Here's the safe locking order to prevent deadlocks: 36.116 - * 36.117 - * #1 xhci->urb_list_lock 36.118 - * #2 urb->lock 36.119 - * #3 xhci->urb_remove_list_lock 36.120 - * #4 xhci->complete_list_lock 36.121 - * 36.122 - * If you're going to grab 2 or more locks at once, ALWAYS grab the lock 36.123 - * at the lowest level FIRST and NEVER grab locks at the same level at the 36.124 - * same time. 36.125 - * 36.126 - * So, if you need xhci->urb_list_lock, grab it before you grab urb->lock 36.127 - */ 36.128 - 36.129 -/* ------------------------------------------------------------------------- 36.130 - Virtual Root HUB 36.131 - ------------------------------------------------------------------------- */ 36.132 -/* destination of request */ 36.133 -#define RH_DEVICE 0x00 36.134 -#define RH_INTERFACE 0x01 36.135 -#define RH_ENDPOINT 0x02 36.136 -#define RH_OTHER 0x03 36.137 - 36.138 -#define RH_CLASS 0x20 36.139 -#define RH_VENDOR 0x40 36.140 - 36.141 -/* Requests: bRequest << 8 | bmRequestType */ 36.142 -#define RH_GET_STATUS 0x0080 36.143 -#define RH_CLEAR_FEATURE 0x0100 36.144 -#define RH_SET_FEATURE 0x0300 36.145 -#define RH_SET_ADDRESS 0x0500 36.146 -#define RH_GET_DESCRIPTOR 0x0680 36.147 -#define RH_SET_DESCRIPTOR 0x0700 36.148 -#define RH_GET_CONFIGURATION 0x0880 36.149 -#define RH_SET_CONFIGURATION 0x0900 36.150 -#define RH_GET_STATE 0x0280 36.151 -#define RH_GET_INTERFACE 0x0A80 36.152 -#define RH_SET_INTERFACE 0x0B00 36.153 -#define RH_SYNC_FRAME 0x0C80 36.154 -/* Our Vendor Specific Request */ 36.155 -#define RH_SET_EP 0x2000 36.156 - 36.157 -/* Hub port features */ 36.158 -#define RH_PORT_CONNECTION 0x00 36.159 -#define RH_PORT_ENABLE 0x01 36.160 -#define RH_PORT_SUSPEND 0x02 36.161 -#define RH_PORT_OVER_CURRENT 0x03 36.162 -#define RH_PORT_RESET 0x04 36.163 -#define RH_PORT_POWER 0x08 36.164 -#define RH_PORT_LOW_SPEED 0x09 36.165 -#define RH_C_PORT_CONNECTION 0x10 36.166 -#define RH_C_PORT_ENABLE 0x11 36.167 -#define RH_C_PORT_SUSPEND 0x12 36.168 -#define RH_C_PORT_OVER_CURRENT 0x13 36.169 -#define RH_C_PORT_RESET 0x14 36.170 - 36.171 -/* Hub features */ 36.172 -#define RH_C_HUB_LOCAL_POWER 0x00 36.173 -#define RH_C_HUB_OVER_CURRENT 0x01 36.174 -#define RH_DEVICE_REMOTE_WAKEUP 0x00 36.175 -#define RH_ENDPOINT_STALL 0x01 36.176 - 36.177 -/* Our Vendor Specific feature */ 36.178 -#define RH_REMOVE_EP 0x00 36.179 - 36.180 -#define RH_ACK 0x01 36.181 -#define RH_REQ_ERR -1 36.182 -#define RH_NACK 0x00 36.183 - 36.184 -#endif 36.185 -
37.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c Thu Sep 22 16:05:44 2005 +0100 37.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c Thu Sep 22 16:12:14 2005 +0100 37.3 @@ -231,3 +231,13 @@ void xb_suspend_comms(void) 37.4 37.5 unbind_evtchn_from_irqhandler(xen_start_info->store_evtchn, &xb_waitq); 37.6 } 37.7 + 37.8 +/* 37.9 + * Local variables: 37.10 + * c-file-style: "linux" 37.11 + * indent-tabs-mode: t 37.12 + * c-indent-level: 8 37.13 + * c-basic-offset: 8 37.14 + * tab-width: 8 37.15 + * End: 37.16 + */
38.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h Thu Sep 22 16:05:44 2005 +0100 38.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h Thu Sep 22 16:12:14 2005 +0100 38.3 @@ -39,3 +39,13 @@ int xs_input_avail(void); 38.4 extern wait_queue_head_t xb_waitq; 38.5 38.6 #endif /* _XENBUS_COMMS_H */ 38.7 + 38.8 +/* 38.9 + * Local variables: 38.10 + * c-file-style: "linux" 38.11 + * indent-tabs-mode: t 38.12 + * c-indent-level: 8 38.13 + * c-basic-offset: 8 38.14 + * tab-width: 8 38.15 + * End: 38.16 + */
39.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c Thu Sep 22 16:05:44 2005 +0100 39.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c Thu Sep 22 16:12:14 2005 +0100 39.3 @@ -186,3 +186,13 @@ xenbus_dev_init(void) 39.4 } 39.5 39.6 __initcall(xenbus_dev_init); 39.7 + 39.8 +/* 39.9 + * Local variables: 39.10 + * c-file-style: "linux" 39.11 + * indent-tabs-mode: t 39.12 + * c-indent-level: 8 39.13 + * c-basic-offset: 8 39.14 + * tab-width: 8 39.15 + * End: 39.16 + */
40.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c Thu Sep 22 16:05:44 2005 +0100 40.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c Thu Sep 22 16:12:14 2005 +0100 40.3 @@ -687,3 +687,13 @@ static int __init xenbus_probe_init(void 40.4 } 40.5 40.6 postcore_initcall(xenbus_probe_init); 40.7 + 40.8 +/* 40.9 + * Local variables: 40.10 + * c-file-style: "linux" 40.11 + * indent-tabs-mode: t 40.12 + * c-indent-level: 8 40.13 + * c-basic-offset: 8 40.14 + * tab-width: 8 40.15 + * End: 40.16 + */
41.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c Thu Sep 22 16:05:44 2005 +0100 41.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c Thu Sep 22 16:12:14 2005 +0100 41.3 @@ -566,3 +566,13 @@ int xs_init(void) 41.4 return PTR_ERR(watcher); 41.5 return 0; 41.6 } 41.7 + 41.8 +/* 41.9 + * Local variables: 41.10 + * c-file-style: "linux" 41.11 + * indent-tabs-mode: t 41.12 + * c-indent-level: 8 41.13 + * c-basic-offset: 8 41.14 + * tab-width: 8 41.15 + * End: 41.16 + */
42.1 --- a/tools/python/xen/xend/XendDomain.py Thu Sep 22 16:05:44 2005 +0100 42.2 +++ b/tools/python/xen/xend/XendDomain.py Thu Sep 22 16:12:14 2005 +0100 42.3 @@ -305,6 +305,13 @@ class XendDomain: 42.4 42.5 @param vmconfig: vm configuration 42.6 """ 42.7 + # We accept our configuration specified as ['config' [...]], which 42.8 + # some tools or configuration files may be using. For save-restore, 42.9 + # we use the value of XendDomainInfo.sxpr() directly, which has no 42.10 + # such item. 42.11 + nested = sxp.child_value(config, 'config') 42.12 + if nested: 42.13 + config = nested 42.14 return XendDomainInfo.restore(self.dbmap.getPath(), config) 42.15 42.16 def domain_restore(self, src, progress=False):
45.1 --- a/xen/include/public/io/netif.h Thu Sep 22 16:05:44 2005 +0100 45.2 +++ b/xen/include/public/io/netif.h Thu Sep 22 16:12:14 2005 +0100 45.3 @@ -10,10 +10,11 @@ 45.4 #define __XEN_PUBLIC_IO_NETIF_H__ 45.5 45.6 typedef struct netif_tx_request { 45.7 - unsigned long addr; /* Machine address of packet. */ 45.8 + grant_ref_t gref; /* Reference to buffer page */ 45.9 + u16 offset:15; /* Offset within buffer page */ 45.10 u16 csum_blank:1; /* Proto csum field blank? */ 45.11 - u16 id:15; /* Echoed in response message. */ 45.12 - u16 size; /* Packet size in bytes. */ 45.13 + u16 id; /* Echoed in response message. */ 45.14 + u16 size; /* Packet size in bytes. */ 45.15 } netif_tx_request_t; 45.16 45.17 typedef struct netif_tx_response { 45.18 @@ -22,21 +23,15 @@ typedef struct netif_tx_response { 45.19 } netif_tx_response_t; 45.20 45.21 typedef struct { 45.22 - u16 id; /* Echoed in response message. */ 45.23 -#ifdef CONFIG_XEN_NETDEV_GRANT 45.24 - grant_ref_t gref; /* 2: Reference to incoming granted frame */ 45.25 -#endif 45.26 + u16 id; /* Echoed in response message. */ 45.27 + grant_ref_t gref; /* Reference to incoming granted frame */ 45.28 } netif_rx_request_t; 45.29 45.30 typedef struct { 45.31 -#ifdef CONFIG_XEN_NETDEV_GRANT 45.32 - u32 addr; /* 0: Offset in page of start of received packet */ 45.33 -#else 45.34 - unsigned long addr; /* Machine address of packet. */ 45.35 -#endif 45.36 - u16 csum_valid:1; /* Protocol checksum is validated? */ 45.37 - u16 id:15; 45.38 - s16 status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ 45.39 + u16 offset; /* Offset in page of start of received packet */ 45.40 + u16 csum_valid; /* Protocol checksum is validated? */ 45.41 + u16 id; 45.42 + s16 status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ 45.43 } netif_rx_response_t; 45.44 45.45 /* 45.46 @@ -53,18 +48,8 @@ typedef u32 NETIF_RING_IDX; 45.47 #define MASK_NETIF_RX_IDX(_i) ((_i)&(NETIF_RX_RING_SIZE-1)) 45.48 #define MASK_NETIF_TX_IDX(_i) ((_i)&(NETIF_TX_RING_SIZE-1)) 45.49 45.50 -#ifdef __x86_64__ 45.51 -/* 45.52 - * This restriction can be lifted when we move netfront/netback to use 45.53 - * grant tables. This will remove memory_t fields from the above structures 45.54 - * and thus relax natural alignment restrictions. 45.55 - */ 45.56 -#define NETIF_TX_RING_SIZE 128 45.57 -#define NETIF_RX_RING_SIZE 128 45.58 -#else 45.59 #define NETIF_TX_RING_SIZE 256 45.60 #define NETIF_RX_RING_SIZE 256 45.61 -#endif 45.62 45.63 /* This structure must fit in a memory page. */ 45.64 typedef struct netif_tx_interface {