ia64/xen-unstable
changeset 209:9d5263d6451f
bitkeeper revision 1.70 (3e53cf6aaY-aEl61as7SnxVeSQYkew)
xen_block.c:
Cleanups to block layer before doing scheduling.
xen_block.c:
Cleanups to block layer before doing scheduling.
author | kaf24@labyrinth.cl.cam.ac.uk |
---|---|
date | Wed Feb 19 18:39:38 2003 +0000 (2003-02-19) |
parents | 2203f6130483 |
children | 358956197b21 |
files | xen-2.4.16/drivers/block/xen_block.c |
line diff
1.1 --- a/xen-2.4.16/drivers/block/xen_block.c Wed Feb 19 17:56:28 2003 +0000 1.2 +++ b/xen-2.4.16/drivers/block/xen_block.c Wed Feb 19 18:39:38 2003 +0000 1.3 @@ -9,18 +9,16 @@ 1.4 #include <xeno/lib.h> 1.5 #include <xeno/sched.h> 1.6 #include <xeno/blkdev.h> 1.7 -#include <xeno/event.h> /* mark_hyp_event */ 1.8 +#include <xeno/event.h> 1.9 #include <hypervisor-ifs/block.h> 1.10 #include <hypervisor-ifs/hypervisor-if.h> 1.11 #include <asm-i386/io.h> 1.12 #include <asm/spinlock.h> 1.13 - 1.14 #include <xeno/keyhandler.h> 1.15 1.16 #define XEN_BLK_DEBUG 0 1.17 #define XEN_BLK_DEBUG_LEVEL KERN_ALERT 1.18 1.19 - 1.20 /* 1.21 * KAF XXX: the current state of play with blk_requests. 1.22 * 1.23 @@ -30,8 +28,7 @@ 1.24 * will go where we currently increment 'nr_pending'. The scheduler will 1.25 * refuse admission of a blk_request if it is already full. 1.26 */ 1.27 -typedef struct blk_request 1.28 -{ 1.29 +typedef struct blk_request { 1.30 struct list_head queue; 1.31 struct buffer_head *bh; 1.32 blk_ring_req_entry_t *request; 1.33 @@ -40,25 +37,19 @@ typedef struct blk_request 1.34 #define MAX_PENDING_REQS 256 /* very arbitrary */ 1.35 static kmem_cache_t *blk_request_cachep; 1.36 static atomic_t nr_pending; 1.37 -static int pending_work; /* which domains have work for us? */ 1.38 - 1.39 +static int pending_work; /* Bitmask: which domains have work for us? */ 1.40 1.41 -/* some definitions */ 1.42 -void dumpx (char *buffer, int count); 1.43 -void printx (char * string); 1.44 -long do_block_io_op_domain (struct task_struct* task); 1.45 -int dispatch_rw_block_io (int index); 1.46 -int dispatch_probe_block_io (int index); 1.47 -int dispatch_debug_block_io (int index); 1.48 +static long do_block_io_op_domain (struct task_struct* task); 1.49 +static int dispatch_rw_block_io (int index); 1.50 +static int dispatch_probe_block_io (int index); 1.51 +static int dispatch_debug_block_io (int index); 1.52 1.53 /* 1.54 - * end_block_io_op 1.55 - * 1.56 - * IO has completed. Need to notify the guest operating system. 1.57 - * Called from ll_rw_block -- currently /DIRECTLY/ -- XXX FIXME 1.58 - * (e.g. hook into proper end processing of ll_rw) 1.59 + * end_block_io_op: 1.60 + * IO has completed. Need to notify the guest operating system. 1.61 + * Called from ll_rw_block -- currently /DIRECTLY/ -- XXX FIXME 1.62 + * (e.g. hook into proper end processing of ll_rw) 1.63 */ 1.64 - 1.65 void end_block_io_op(struct buffer_head * bh) 1.66 { 1.67 unsigned long cpu_mask; 1.68 @@ -101,15 +92,6 @@ void end_block_io_op(struct buffer_head 1.69 /* 1.70 * now check if there is any pending work from any domain 1.71 * that we were previously unable to process. 1.72 - * 1.73 - * NOTE: the current algorithm will check _every_ domain 1.74 - * and wake up _every_ domain that has pending work. 1.75 - * In the future, we should stop waking up domains once 1.76 - * there isn't any space for their requests any more 1.77 - * ALSO, we need to maintain a counter of the last domain 1.78 - * that we woke up for fairness... we shouldn't restart 1.79 - * at domain 0 every time (although we might want to special 1.80 - * case domain 0); 1.81 */ 1.82 for ( loop = 0; loop < XEN_BLOCK_MAX_DOMAINS; loop++ ) 1.83 { 1.84 @@ -139,10 +121,9 @@ void end_block_io_op(struct buffer_head 1.85 1.86 1.87 /* 1.88 - * do_block_io_op 1.89 - * 1.90 - * Accept a block io request from a guest operating system. 1.91 - * There is an entry in the hypervisor_call_table (xen/arch/i386/entry.S). 1.92 + * do_block_io_op: 1.93 + * Accept a block io request from a guest operating system. 1.94 + * There is an entry in the hypervisor_call_table (xen/arch/i386/entry.S). 1.95 */ 1.96 1.97 long do_block_io_op (void) 1.98 @@ -152,11 +133,10 @@ long do_block_io_op (void) 1.99 1.100 1.101 /* 1.102 - * do_block_io_op 1.103 - * 1.104 - * handle the requests for a particular domain 1.105 + * do_block_io_op_domain: 1.106 + * Handle the requests for a particular domain 1.107 */ 1.108 -long do_block_io_op_domain (struct task_struct* task) 1.109 +static long do_block_io_op_domain (struct task_struct* task) 1.110 { 1.111 blk_ring_t *blk_ring = task->blk_ring_base; 1.112 int loop, status; 1.113 @@ -209,16 +189,16 @@ long do_block_io_op_domain (struct task_ 1.114 } 1.115 1.116 1.117 -int dispatch_debug_block_io (int index) 1.118 +static int dispatch_debug_block_io (int index) 1.119 { 1.120 printk (KERN_ALERT "dispatch_debug_block_io: UNIMPL\n"); 1.121 return 1; 1.122 } 1.123 1.124 -extern void ide_probe_devices(xen_disk_info_t *xdi); 1.125 1.126 -int dispatch_probe_block_io (int index) 1.127 +static int dispatch_probe_block_io (int index) 1.128 { 1.129 + extern void ide_probe_devices(xen_disk_info_t *xdi); 1.130 blk_ring_t *blk_ring = current->blk_ring_base; 1.131 xen_disk_info_t *xdi; 1.132 1.133 @@ -233,10 +213,10 @@ int dispatch_probe_block_io (int index) 1.134 return 0; 1.135 } 1.136 1.137 -extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 1.138 1.139 -int dispatch_rw_block_io (int index) 1.140 +static int dispatch_rw_block_io (int index) 1.141 { 1.142 + extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 1.143 blk_ring_t *blk_ring = current->blk_ring_base; 1.144 struct buffer_head *bh; 1.145 struct request_queue *rq; 1.146 @@ -317,41 +297,6 @@ int dispatch_rw_block_io (int index) 1.147 } 1.148 1.149 1.150 -/* 1.151 - * debug dump_queue 1.152 - * arguments: queue head, name of queue 1.153 - */ 1.154 -void dump_queue(struct list_head *queue, char *name) 1.155 -{ 1.156 - struct list_head *list; 1.157 - int loop = 0; 1.158 - 1.159 - printk ("QUEUE %s %lx n: %lx, p: %lx\n", name, (unsigned long)queue, 1.160 - (unsigned long) queue->next, (unsigned long) queue->prev); 1.161 - list_for_each (list, queue) { 1.162 - printk (" %s %d : %lx n: %lx, p: %lx\n", name, loop++, 1.163 - (unsigned long)list, 1.164 - (unsigned long)list->next, (unsigned long)list->prev); 1.165 - } 1.166 - return; 1.167 -} 1.168 - 1.169 -void dump_queue_head(struct list_head *queue, char *name) 1.170 -{ 1.171 - struct list_head *list; 1.172 - int loop = 0; 1.173 - 1.174 - printk ("QUEUE %s %lx n: %lx, p: %lx\n", name, (unsigned long)queue, 1.175 - (unsigned long) queue->next, (unsigned long) queue->prev); 1.176 - list_for_each (list, queue) { 1.177 - printk (" %d : %lx n: %lx, p: %lx\n", loop++, 1.178 - (unsigned long)list, 1.179 - (unsigned long)list->next, (unsigned long)list->prev); 1.180 - if (loop >= 5) return; 1.181 - } 1.182 -} 1.183 - 1.184 - 1.185 static void dump_blockq(u_char key, void *dev_id, struct pt_regs *regs) 1.186 { 1.187 printk("Dumping block queue stats: nr_pending = %d\n", 1.188 @@ -359,12 +304,6 @@ static void dump_blockq(u_char key, void 1.189 } 1.190 1.191 1.192 -/* 1.193 - * initialize_block_io 1.194 - * 1.195 - * initialize everything for block io called from 1.196 - * arch/i386/setup.c::start_of_day 1.197 - */ 1.198 void initialize_block_io () 1.199 { 1.200 blk_request_cachep = kmem_cache_create( 1.201 @@ -373,9 +312,7 @@ void initialize_block_io () 1.202 1.203 add_key_handler('b', dump_blockq, "dump xen ide blkdev stats"); 1.204 1.205 - /* If bit i is true then domain i has work for us to do. */ 1.206 pending_work = 0; 1.207 - 1.208 atomic_set(&nr_pending, 0); 1.209 } 1.210