ia64/xen-unstable

changeset 206:75005643116d

bitkeeper revision 1.67 (3e538f77DpPDrYg-B4aKKUEVyvbOIw)

xen_block.c:
Gutted some verbosity from xen_block.c. Ready to accept a proper scheduler (or placeholding one at least) now, I think.
author kaf24@labyrinth.cl.cam.ac.uk
date Wed Feb 19 14:06:47 2003 +0000 (2003-02-19)
parents 6ef50c70b315
children 39dc127a1ffe
files xen-2.4.16/drivers/block/xen_block.c
line diff
     1.1 --- a/xen-2.4.16/drivers/block/xen_block.c	Wed Feb 19 13:40:12 2003 +0000
     1.2 +++ b/xen-2.4.16/drivers/block/xen_block.c	Wed Feb 19 14:06:47 2003 +0000
     1.3 @@ -20,8 +20,16 @@
     1.4  #define XEN_BLK_DEBUG 0
     1.5  #define XEN_BLK_DEBUG_LEVEL KERN_ALERT
     1.6  
     1.7 -#define XEN_BLK_REQUEST_LIST_SIZE 256                      /* very arbitrary */
     1.8  
     1.9 +/*
    1.10 + * KAF XXX: the current state of play with blk_requests.
    1.11 + * 
    1.12 + * The following infrastructure is really here for future use.
    1.13 + * blk_requests are currently not used by any mechanism, but eventually
    1.14 + * pending blk_requests will go into an IO scheduler. This entry point
    1.15 + * will go where we currently increment 'nr_pending'. The scheduler will
    1.16 + * refuse admission of a blk_request if it is already full.
    1.17 + */
    1.18  typedef struct blk_request
    1.19  {
    1.20    struct list_head queue;
    1.21 @@ -29,15 +37,11 @@ typedef struct blk_request
    1.22    blk_ring_entry_t request;
    1.23    struct task_struct *domain;                           /* requesting domain */
    1.24  } blk_request_t;
    1.25 -
    1.26 +#define MAX_PENDING_REQS 256                 /* very arbitrary */
    1.27 +static kmem_cache_t *blk_request_cachep;
    1.28 +static atomic_t nr_pending, nr_done;
    1.29  static int pending_work;              /* which domains have work for us? */
    1.30  
    1.31 -blk_request_t blk_request_list[XEN_BLK_REQUEST_LIST_SIZE];
    1.32 -
    1.33 -struct list_head free_queue;          /* unused requests */
    1.34 -struct list_head pending_queue;       /* waiting for hardware */
    1.35 -spinlock_t free_queue_lock;
    1.36 -spinlock_t pending_queue_lock;
    1.37  
    1.38  /* some definitions */
    1.39  void dumpx (char *buffer, int count);
    1.40 @@ -67,15 +71,14 @@ void end_block_io_op(struct buffer_head 
    1.41  	printk(XEN_BLK_DEBUG_LEVEL "XEN end_block_io_op,  bh: %lx\n",
    1.42  	       (unsigned long)bh);
    1.43      
    1.44 -    spin_lock_irqsave(&pending_queue_lock, flags);
    1.45      if ( (blk_request = (blk_request_t *)bh->b_xen_request) == NULL) 
    1.46          goto bad_interrupt;
    1.47 -    list_del(&blk_request->queue);
    1.48 -    spin_unlock(&pending_queue_lock);
    1.49 +    atomic_dec(&nr_pending);
    1.50      
    1.51      p = blk_request->domain;
    1.52  
    1.53 -    spin_lock(&p->io_done_queue_lock);
    1.54 +    atomic_inc(&nr_done);
    1.55 +    spin_lock_irqsave(&p->io_done_queue_lock, flags);
    1.56      list_add_tail(&blk_request->queue, &p->io_done_queue);
    1.57      /* enqueue work for 'flush_blk_queue' handler */
    1.58      cpu_mask = mark_hyp_event(p, _HYP_EVENT_BLK_RX);
    1.59 @@ -89,7 +92,6 @@ void end_block_io_op(struct buffer_head 
    1.60      printk (KERN_ALERT
    1.61              "   block io interrupt received for unknown buffer [0x%lx]\n",
    1.62              (unsigned long) bh);
    1.63 -    spin_unlock_irqrestore(&pending_queue_lock, flags);
    1.64      BUG();
    1.65      return;
    1.66  }
    1.67 @@ -118,7 +120,8 @@ void flush_blk_queue(void)
    1.68  	blk_request = list_entry(p->io_done_queue.next, blk_request_t, queue);
    1.69  	list_del(&blk_request->queue);
    1.70  	spin_unlock_irqrestore(&p->io_done_queue_lock, flags);
    1.71 -	
    1.72 +	atomic_dec(&nr_done);
    1.73 +
    1.74  	/* place on ring for guest os */ 
    1.75  	blk_ring = p->blk_ring_base;
    1.76  	position = blk_ring->brx_prod;
    1.77 @@ -137,9 +140,7 @@ void flush_blk_queue(void)
    1.78  	if ( blk_request->bh )
    1.79  	    kfree(blk_request->bh); 
    1.80  
    1.81 -	spin_lock_irqsave(&free_queue_lock, flags);
    1.82 -	list_add_tail(&blk_request->queue, &free_queue);
    1.83 -	spin_unlock_irqrestore(&free_queue_lock, flags);
    1.84 +        kmem_cache_free(blk_request_cachep, blk_request);
    1.85  
    1.86  	spin_lock_irqsave(&p->io_done_queue_lock, flags);
    1.87      }
    1.88 @@ -285,7 +286,6 @@ int dispatch_rw_block_io (int index)
    1.89      struct request_queue *rq;
    1.90      int operation;
    1.91      blk_request_t *blk_request;
    1.92 -    unsigned long flags;
    1.93      
    1.94      /*
    1.95       * check to make sure that the block request seems at least
    1.96 @@ -312,22 +312,12 @@ int dispatch_rw_block_io (int index)
    1.97  		"sync" : "async"));
    1.98      }
    1.99  
   1.100 -    /* find an empty request slot */
   1.101 -    spin_lock_irqsave(&free_queue_lock, flags);
   1.102 -    if (list_empty(&free_queue)) {
   1.103 -	spin_unlock_irqrestore(&free_queue_lock, flags);
   1.104 -	return 1;
   1.105 -    }
   1.106 +    /* XXX KAF: A bit racey maybe? The whole wake-up pending needs fixing. */
   1.107 +    if ( atomic_read(&nr_pending) >= MAX_PENDING_REQS )
   1.108 +        return 1;
   1.109 +    atomic_inc(&nr_pending);
   1.110 +    blk_request = kmem_cache_alloc(blk_request_cachep, GFP_ATOMIC);
   1.111  
   1.112 -    blk_request = list_entry(free_queue.next, blk_request_t, queue);
   1.113 -    list_del(&blk_request->queue);
   1.114 -    spin_unlock_irqrestore(&free_queue_lock, flags);
   1.115 -
   1.116 -    /* place request on pending list */
   1.117 -    spin_lock_irqsave(&pending_queue_lock, flags);
   1.118 -    list_add_tail(&blk_request->queue, &pending_queue);
   1.119 -    spin_unlock_irqrestore(&pending_queue_lock, flags);
   1.120 -    
   1.121      /* we'll be doing this frequently, would a cache be appropriate? */
   1.122      /* free in flush_blk_queue */
   1.123      bh = (struct buffer_head *) kmalloc(sizeof(struct buffer_head), 
   1.124 @@ -407,64 +397,31 @@ void dump_queue_head(struct list_head *q
   1.125      }
   1.126  }
   1.127  
   1.128 +
   1.129  static void dump_blockq(u_char key, void *dev_id, struct pt_regs *regs) 
   1.130  {
   1.131 -    u_long flags; 
   1.132 -
   1.133 -    printk("Dumping block queues:\n"); 
   1.134 -
   1.135 -    spin_lock_irqsave(&free_queue_lock, flags);
   1.136 -    dump_queue(&free_queue, "FREE QUEUE"); 
   1.137 -    spin_unlock_irqrestore(&free_queue_lock, flags);
   1.138 -
   1.139 -    spin_lock_irqsave(&pending_queue_lock, flags);
   1.140 -    dump_queue(&pending_queue, "PENDING QUEUE"); 
   1.141 -    spin_unlock_irqrestore(&pending_queue_lock, flags);
   1.142 -
   1.143 -#if 0
   1.144 -    spin_lock_irqsave(&io_done_queue_lock, flags);
   1.145 -    dump_queue(&io_done_queue, "IO DONE QUEUE"); 
   1.146 -    spin_unlock_irqrestore(&io_done_queue_lock, flags);
   1.147 -#else
   1.148 -    printk("FIXME: IO_DONE_QUEUE IS NOW PER DOMAIN!!\n");
   1.149 -#endif
   1.150 -
   1.151 -    return; 
   1.152 +    printk("Dumping block queue stats:\n"); 
   1.153 +    printk("nr_pending = %d, nr_done = %d\n",
   1.154 +           atomic_read(&nr_pending), atomic_read(&nr_done));
   1.155  }
   1.156  
   1.157  
   1.158 -
   1.159  /*
   1.160   * initialize_block_io
   1.161   *
   1.162 - * initialize everything for block io 
   1.163 - * called from arch/i386/setup.c::start_of_day
   1.164 + * initialize everything for block io called from 
   1.165 + * arch/i386/setup.c::start_of_day
   1.166   */
   1.167 -
   1.168 -void initialize_block_io (){
   1.169 -
   1.170 -    int loop;
   1.171 -    
   1.172 -    INIT_LIST_HEAD(&free_queue);
   1.173 -    INIT_LIST_HEAD(&pending_queue);
   1.174 -    
   1.175 -    spin_lock_init(&free_queue_lock);
   1.176 -    spin_lock_init(&pending_queue_lock);
   1.177 +void initialize_block_io ()
   1.178 +{
   1.179 +    blk_request_cachep = kmem_cache_create(
   1.180 +        "blk_request_cache", sizeof(blk_request_t),
   1.181 +        0, SLAB_HWCACHE_ALIGN, NULL, NULL);
   1.182      
   1.183 -    for (loop = 0; loop < XEN_BLK_REQUEST_LIST_SIZE; loop++)
   1.184 -    {
   1.185 -	list_add_tail(&blk_request_list[loop].queue, &free_queue);
   1.186 -    }
   1.187 -    
   1.188 +    add_key_handler('b', dump_blockq, "dump xen ide blkdev stats"); 
   1.189      
   1.190 -    add_key_handler('b', dump_blockq, "dump xen ide block queues"); 
   1.191 -    
   1.192 -    /*
   1.193 -     * if bit i is true then domain i has work for us to do.
   1.194 -     */
   1.195 +    /* If bit i is true then domain i has work for us to do. */
   1.196      pending_work = 0;
   1.197 -    
   1.198 -    return;
   1.199  }
   1.200  
   1.201