ia64/xen-unstable

changeset 1912:408ce4535108

bitkeeper revision 1.1108.1.24 (41051ee21NzgdgVzWz4PTL4xkkgjBQ)

Merge scramble.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xeno
author kaf24@scramble.cl.cam.ac.uk
date Mon Jul 26 15:10:26 2004 +0000 (2004-07-26)
parents af423aeca402 cb113908a384
children e59c333c2ba0
files .rootkeys linux-2.4.26-xen-sparse/arch/xen/mm/hypervisor.c linux-2.4.26-xen-sparse/include/asm-xen/hypervisor.h linux-2.4.26-xen-sparse/include/asm-xen/pci.h linux-2.4.26-xen-sparse/include/linux/blkdev.h
line diff
     1.1 --- a/.rootkeys	Mon Jul 26 14:57:12 2004 +0000
     1.2 +++ b/.rootkeys	Mon Jul 26 15:10:26 2004 +0000
     1.3 @@ -136,6 +136,7 @@ 3f1056a9L_kqHcFheV00KbKBzv9j5w linux-2.4
     1.4  3f689063nhrIRsMMZjZxMFk7iEINqQ linux-2.4.26-xen-sparse/include/asm-xen/xen_proc.h
     1.5  40659defgWA92arexpMGn8X3QMDj3w linux-2.4.26-xen-sparse/include/asm-xen/xor.h
     1.6  3f056927gMHl7mWB89rb73JahbhQIA linux-2.4.26-xen-sparse/include/linux/blk.h
     1.7 +41051ec1m6bJVjZocTG0C0V0O6RsVg linux-2.4.26-xen-sparse/include/linux/blkdev.h
     1.8  401c0590D_kwJDU59X8NyvqSv_Cl2A linux-2.4.26-xen-sparse/include/linux/sched.h
     1.9  40a248afgI0_JKthdYAe8beVfXSTpQ linux-2.4.26-xen-sparse/include/linux/skbuff.h
    1.10  3e5a4e686V0nioX2ZpFf056sgvdiQw linux-2.4.26-xen-sparse/include/linux/sunrpc/debug.h
     2.1 --- a/linux-2.4.26-xen-sparse/arch/xen/mm/hypervisor.c	Mon Jul 26 14:57:12 2004 +0000
     2.2 +++ b/linux-2.4.26-xen-sparse/arch/xen/mm/hypervisor.c	Mon Jul 26 15:10:26 2004 +0000
     2.3 @@ -120,7 +120,10 @@ static inline void __flush_page_update_q
     2.4      idx = 0;
     2.5      wmb(); /* Make sure index is cleared first to avoid double updates. */
     2.6      if ( unlikely(HYPERVISOR_mmu_update(update_queue, _idx, NULL) < 0) )
     2.7 -        panic("Failed to execute MMU updates");
     2.8 +    {
     2.9 +        printk(KERN_ALERT "Failed to execute MMU updates.\n");
    2.10 +        BUG();
    2.11 +    }
    2.12  }
    2.13  
    2.14  void _flush_page_update_queue(void)
     3.1 --- a/linux-2.4.26-xen-sparse/include/asm-xen/hypervisor.h	Mon Jul 26 14:57:12 2004 +0000
     3.2 +++ b/linux-2.4.26-xen-sparse/include/asm-xen/hypervisor.h	Mon Jul 26 15:10:26 2004 +0000
     3.3 @@ -383,9 +383,12 @@ static inline int HYPERVISOR_update_va_m
     3.4          "b" (page_nr), "c" ((new_val).pte_low), "d" (flags) : "memory" );
     3.5  
     3.6      if ( unlikely(ret < 0) )
     3.7 -        panic("Failed update VA mapping: %08lx, %08lx, %08lx",
     3.8 -              page_nr, (new_val).pte_low, flags);
     3.9 -    
    3.10 +    {
    3.11 +        printk(KERN_ALERT "Failed update VA mapping: %08lx, %08lx, %08lx\n",
    3.12 +               page_nr, (new_val).pte_low, flags);
    3.13 +        BUG();
    3.14 +    }
    3.15 +
    3.16      return ret;
    3.17  }
    3.18  
     4.1 --- a/linux-2.4.26-xen-sparse/include/asm-xen/pci.h	Mon Jul 26 14:57:12 2004 +0000
     4.2 +++ b/linux-2.4.26-xen-sparse/include/asm-xen/pci.h	Mon Jul 26 15:10:26 2004 +0000
     4.3 @@ -145,8 +145,7 @@ static inline void pci_unmap_page(struct
     4.4  static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
     4.5  			     int nents, int direction)
     4.6  {
     4.7 -	int i, j, nr_pfns;
     4.8 -	unsigned long first_pfn;
     4.9 +	int i;
    4.10  
    4.11  	if (direction == PCI_DMA_NONE)
    4.12  		out_of_line_bug();
    4.13 @@ -160,28 +159,10 @@ static inline int pci_map_sg(struct pci_
    4.14   		else if (!sg[i].address && !sg[i].page)
    4.15   			out_of_line_bug();
    4.16   
    4.17 - 		if (sg[i].address) {
    4.18 + 		if (sg[i].address)
    4.19   			sg[i].dma_address = virt_to_bus(sg[i].address);
    4.20 - 			first_pfn = virt_to_phys(sg[i].address) >> PAGE_SHIFT;
    4.21 - 			nr_pfns = (((unsigned long)sg[i].address & 
    4.22 - 			    (PAGE_SIZE-1)) + sg[i].length + PAGE_SIZE - 1) >>
    4.23 - 			    PAGE_SHIFT;
    4.24 - 		} else {
    4.25 + 		else
    4.26   			sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset;
    4.27 - 			first_pfn = page_to_phys(sg[i].page) >> PAGE_SHIFT;
    4.28 - 			nr_pfns = (sg[i].offset + sg[i].length + PAGE_SIZE - 
    4.29 - 			    1) >> PAGE_SHIFT;
    4.30 - 		}
    4.31 -
    4.32 -                /*
    4.33 -                 * Check that we merged physical buffers are also contiguous
    4.34 -                 * in machine-address space. We try to fail by returning 0.
    4.35 -                 */
    4.36 -                for (j = 1; j < nr_pfns; j++) {
    4.37 -                    if ( unlikely(pfn_to_mfn(first_pfn+j) != 
    4.38 -                                  (pfn_to_mfn(first_pfn)+j)) )
    4.39 -                        return 0;
    4.40 -                }
    4.41   	}
    4.42   
    4.43  	flush_write_buffers();
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/linux-2.4.26-xen-sparse/include/linux/blkdev.h	Mon Jul 26 15:10:26 2004 +0000
     5.3 @@ -0,0 +1,372 @@
     5.4 +#ifndef _LINUX_BLKDEV_H
     5.5 +#define _LINUX_BLKDEV_H
     5.6 +
     5.7 +#include <linux/major.h>
     5.8 +#include <linux/sched.h>
     5.9 +#include <linux/genhd.h>
    5.10 +#include <linux/tqueue.h>
    5.11 +#include <linux/list.h>
    5.12 +#include <linux/mm.h>
    5.13 +
    5.14 +#include <asm/io.h>
    5.15 +
    5.16 +struct request_queue;
    5.17 +typedef struct request_queue request_queue_t;
    5.18 +struct elevator_s;
    5.19 +typedef struct elevator_s elevator_t;
    5.20 +
    5.21 +/*
    5.22 + * Ok, this is an expanded form so that we can use the same
    5.23 + * request for paging requests.
    5.24 + */
    5.25 +struct request {
    5.26 +	struct list_head queue;
    5.27 +	int elevator_sequence;
    5.28 +
    5.29 +	volatile int rq_status;	/* should split this into a few status bits */
    5.30 +#define RQ_INACTIVE		(-1)
    5.31 +#define RQ_ACTIVE		1
    5.32 +#define RQ_SCSI_BUSY		0xffff
    5.33 +#define RQ_SCSI_DONE		0xfffe
    5.34 +#define RQ_SCSI_DISCONNECTING	0xffe0
    5.35 +
    5.36 +	kdev_t rq_dev;
    5.37 +	int cmd;		/* READ or WRITE */
    5.38 +	int errors;
    5.39 +	unsigned long start_time;
    5.40 +	unsigned long sector;
    5.41 +	unsigned long nr_sectors;
    5.42 +	unsigned long hard_sector, hard_nr_sectors;
    5.43 +	unsigned int nr_segments;
    5.44 +	unsigned int nr_hw_segments;
    5.45 +	unsigned long current_nr_sectors, hard_cur_sectors;
    5.46 +	void * special;
    5.47 +	char * buffer;
    5.48 +	struct completion * waiting;
    5.49 +	struct buffer_head * bh;
    5.50 +	struct buffer_head * bhtail;
    5.51 +	request_queue_t *q;
    5.52 +};
    5.53 +
    5.54 +#include <linux/elevator.h>
    5.55 +
    5.56 +typedef int (merge_request_fn) (request_queue_t *q, 
    5.57 +				struct request  *req,
    5.58 +				struct buffer_head *bh,
    5.59 +				int);
    5.60 +typedef int (merge_requests_fn) (request_queue_t *q, 
    5.61 +				 struct request  *req,
    5.62 +				 struct request  *req2,
    5.63 +				 int);
    5.64 +typedef void (request_fn_proc) (request_queue_t *q);
    5.65 +typedef request_queue_t * (queue_proc) (kdev_t dev);
    5.66 +typedef int (make_request_fn) (request_queue_t *q, int rw, struct buffer_head *bh);
    5.67 +typedef void (plug_device_fn) (request_queue_t *q, kdev_t device);
    5.68 +typedef void (unplug_device_fn) (void *q);
    5.69 +
    5.70 +struct request_list {
    5.71 +	unsigned int count;
    5.72 +	unsigned int pending[2];
    5.73 +	struct list_head free;
    5.74 +};
    5.75 +
    5.76 +struct request_queue
    5.77 +{
    5.78 +	/*
    5.79 +	 * the queue request freelist, one for reads and one for writes
    5.80 +	 */
    5.81 +	struct request_list	rq;
    5.82 +
    5.83 +	/*
    5.84 +	 * The total number of requests on each queue
    5.85 +	 */
    5.86 +	int nr_requests;
    5.87 +
    5.88 +	/*
    5.89 +	 * Batching threshold for sleep/wakeup decisions
    5.90 +	 */
    5.91 +	int batch_requests;
    5.92 +
    5.93 +	/*
    5.94 +	 * The total number of 512byte blocks on each queue
    5.95 +	 */
    5.96 +	atomic_t nr_sectors;
    5.97 +
    5.98 +	/*
    5.99 +	 * Batching threshold for sleep/wakeup decisions
   5.100 +	 */
   5.101 +	int batch_sectors;
   5.102 +
   5.103 +	/*
   5.104 +	 * The max number of 512byte blocks on each queue
   5.105 +	 */
   5.106 +	int max_queue_sectors;
   5.107 +
   5.108 +	/*
   5.109 +	 * Together with queue_head for cacheline sharing
   5.110 +	 */
   5.111 +	struct list_head	queue_head;
   5.112 +	elevator_t		elevator;
   5.113 +
   5.114 +	request_fn_proc		* request_fn;
   5.115 +	merge_request_fn	* back_merge_fn;
   5.116 +	merge_request_fn	* front_merge_fn;
   5.117 +	merge_requests_fn	* merge_requests_fn;
   5.118 +	make_request_fn		* make_request_fn;
   5.119 +	plug_device_fn		* plug_device_fn;
   5.120 +	/*
   5.121 +	 * The queue owner gets to use this for whatever they like.
   5.122 +	 * ll_rw_blk doesn't touch it.
   5.123 +	 */
   5.124 +	void			* queuedata;
   5.125 +
   5.126 +	/*
   5.127 +	 * This is used to remove the plug when tq_disk runs.
   5.128 +	 */
   5.129 +	struct tq_struct	plug_tq;
   5.130 +
   5.131 +	/*
   5.132 +	 * Boolean that indicates whether this queue is plugged or not.
   5.133 +	 */
   5.134 +	int			plugged:1;
   5.135 +
   5.136 +	/*
   5.137 +	 * Boolean that indicates whether current_request is active or
   5.138 +	 * not.
   5.139 +	 */
   5.140 +	int			head_active:1;
   5.141 +
   5.142 +	/*
   5.143 +	 * Boolean that indicates you will use blk_started_sectors
   5.144 +	 * and blk_finished_sectors in addition to blk_started_io
   5.145 +	 * and blk_finished_io.  It enables the throttling code to 
   5.146 +	 * help keep the sectors in flight to a reasonable value
   5.147 +	 */
   5.148 +	int			can_throttle:1;
   5.149 +
   5.150 +	unsigned long		bounce_pfn;
   5.151 +
   5.152 +	/*
   5.153 +	 * Is meant to protect the queue in the future instead of
   5.154 +	 * io_request_lock
   5.155 +	 */
   5.156 +	spinlock_t		queue_lock;
   5.157 +
   5.158 +	/*
   5.159 +	 * Tasks wait here for free read and write requests
   5.160 +	 */
   5.161 +	wait_queue_head_t	wait_for_requests;
   5.162 +};
   5.163 +
   5.164 +#define blk_queue_plugged(q)	(q)->plugged
   5.165 +#define blk_fs_request(rq)	((rq)->cmd == READ || (rq)->cmd == WRITE)
   5.166 +#define blk_queue_empty(q)	list_empty(&(q)->queue_head)
   5.167 +
   5.168 +extern inline int rq_data_dir(struct request *rq)
   5.169 +{
   5.170 +	if (rq->cmd == READ)
   5.171 +		return READ;
   5.172 +	else if (rq->cmd == WRITE)
   5.173 +		return WRITE;
   5.174 +	else {
   5.175 +		BUG();
   5.176 +		return -1; /* ahem */
   5.177 +	}
   5.178 +}
   5.179 +
   5.180 +extern unsigned long blk_max_low_pfn, blk_max_pfn;
   5.181 +
   5.182 +#define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
   5.183 +#define BLK_BOUNCE_ANY		((u64)blk_max_pfn << PAGE_SHIFT)
   5.184 +
   5.185 +extern void blk_queue_bounce_limit(request_queue_t *, u64);
   5.186 +
   5.187 +#ifdef CONFIG_HIGHMEM
   5.188 +extern struct buffer_head *create_bounce(int, struct buffer_head *);
   5.189 +extern inline struct buffer_head *blk_queue_bounce(request_queue_t *q, int rw,
   5.190 +						   struct buffer_head *bh)
   5.191 +{
   5.192 +	struct page *page = bh->b_page;
   5.193 +
   5.194 +#ifndef CONFIG_DISCONTIGMEM
   5.195 +	if (page - mem_map <= q->bounce_pfn)
   5.196 +#else
   5.197 +	if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_paddr >> PAGE_SHIFT) <= q->bounce_pfn)
   5.198 +#endif
   5.199 +		return bh;
   5.200 +
   5.201 +	return create_bounce(rw, bh);
   5.202 +}
   5.203 +#else
   5.204 +#define blk_queue_bounce(q, rw, bh)	(bh)
   5.205 +#endif
   5.206 +
   5.207 +#ifdef CONFIG_XEN
   5.208 +/* Used for buffer merging, where it is imperative we use machine addresses! */
   5.209 +#define bh_phys(bh)		(page_to_bus((bh)->b_page) + bh_offset((bh)))
   5.210 +#else
   5.211 +#define bh_phys(bh)		(page_to_phys((bh)->b_page) + bh_offset((bh)))
   5.212 +#endif
   5.213 +
   5.214 +#define BH_CONTIG(b1, b2)	(bh_phys((b1)) + (b1)->b_size == bh_phys((b2)))
   5.215 +#define BH_PHYS_4G(b1, b2)	((bh_phys((b1)) | 0xffffffff) == ((bh_phys((b2)) + (b2)->b_size - 1) | 0xffffffff))
   5.216 +
   5.217 +struct blk_dev_struct {
   5.218 +	/*
   5.219 +	 * queue_proc has to be atomic
   5.220 +	 */
   5.221 +	request_queue_t		request_queue;
   5.222 +	queue_proc		*queue;
   5.223 +	void			*data;
   5.224 +};
   5.225 +
   5.226 +struct sec_size {
   5.227 +	unsigned block_size;
   5.228 +	unsigned block_size_bits;
   5.229 +};
   5.230 +
   5.231 +/*
   5.232 + * Used to indicate the default queue for drivers that don't bother
   5.233 + * to implement multiple queues.  We have this access macro here
   5.234 + * so as to eliminate the need for each and every block device
   5.235 + * driver to know about the internal structure of blk_dev[].
   5.236 + */
   5.237 +#define BLK_DEFAULT_QUEUE(_MAJOR)  &blk_dev[_MAJOR].request_queue
   5.238 +
   5.239 +extern struct sec_size * blk_sec[MAX_BLKDEV];
   5.240 +extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
   5.241 +extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size);
   5.242 +extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
   5.243 +extern void generic_make_request(int rw, struct buffer_head * bh);
   5.244 +extern inline request_queue_t *blk_get_queue(kdev_t dev);
   5.245 +extern void blkdev_release_request(struct request *);
   5.246 +
   5.247 +/*
   5.248 + * Access functions for manipulating queue properties
   5.249 + */
   5.250 +extern int blk_grow_request_list(request_queue_t *q, int nr_requests, int max_queue_sectors);
   5.251 +extern void blk_init_queue(request_queue_t *, request_fn_proc *);
   5.252 +extern void blk_cleanup_queue(request_queue_t *);
   5.253 +extern void blk_queue_headactive(request_queue_t *, int);
   5.254 +extern void blk_queue_throttle_sectors(request_queue_t *, int);
   5.255 +extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
   5.256 +extern void generic_unplug_device(void *);
   5.257 +extern inline int blk_seg_merge_ok(struct buffer_head *, struct buffer_head *);
   5.258 +
   5.259 +extern int * blk_size[MAX_BLKDEV];
   5.260 +
   5.261 +extern int * blksize_size[MAX_BLKDEV];
   5.262 +
   5.263 +extern int * hardsect_size[MAX_BLKDEV];
   5.264 +
   5.265 +extern int * max_readahead[MAX_BLKDEV];
   5.266 +
   5.267 +extern int * max_sectors[MAX_BLKDEV];
   5.268 +
   5.269 +extern int * max_segments[MAX_BLKDEV];
   5.270 +
   5.271 +#define MAX_SEGMENTS 128
   5.272 +#define MAX_SECTORS 255
   5.273 +#define MAX_QUEUE_SECTORS (4 << (20 - 9)) /* 4 mbytes when full sized */
   5.274 +#define MAX_NR_REQUESTS 1024 /* 1024k when in 512 units, normally min is 1M in 1k units */
   5.275 +
   5.276 +#define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
   5.277 +
   5.278 +#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queue)
   5.279 +#define blkdev_entry_next_request(entry) blkdev_entry_to_request((entry)->next)
   5.280 +#define blkdev_entry_prev_request(entry) blkdev_entry_to_request((entry)->prev)
   5.281 +#define blkdev_next_request(req) blkdev_entry_to_request((req)->queue.next)
   5.282 +#define blkdev_prev_request(req) blkdev_entry_to_request((req)->queue.prev)
   5.283 +
   5.284 +extern void drive_stat_acct (kdev_t dev, int rw,
   5.285 +					unsigned long nr_sectors, int new_io);
   5.286 +
   5.287 +static inline int get_hardsect_size(kdev_t dev)
   5.288 +{
   5.289 +	int retval = 512;
   5.290 +	int major = MAJOR(dev);
   5.291 +
   5.292 +	if (hardsect_size[major]) {
   5.293 +		int minor = MINOR(dev);
   5.294 +		if (hardsect_size[major][minor])
   5.295 +			retval = hardsect_size[major][minor];
   5.296 +	}
   5.297 +	return retval;
   5.298 +}
   5.299 +
   5.300 +static inline int blk_oversized_queue(request_queue_t * q)
   5.301 +{
   5.302 +	if (q->can_throttle)
   5.303 +		return atomic_read(&q->nr_sectors) > q->max_queue_sectors;
   5.304 +	return q->rq.count == 0;
   5.305 +}
   5.306 +
   5.307 +static inline int blk_oversized_queue_reads(request_queue_t * q)
   5.308 +{
   5.309 +	if (q->can_throttle)
   5.310 +		return atomic_read(&q->nr_sectors) > q->max_queue_sectors + q->batch_sectors;
   5.311 +	return q->rq.count == 0;
   5.312 +}
   5.313 +
   5.314 +static inline int blk_oversized_queue_batch(request_queue_t * q)
   5.315 +{
   5.316 +	return atomic_read(&q->nr_sectors) > q->max_queue_sectors - q->batch_sectors;
   5.317 +}
   5.318 +
   5.319 +#define blk_finished_io(nsects)	do { } while (0)
   5.320 +#define blk_started_io(nsects)	do { } while (0)
   5.321 +
   5.322 +static inline void blk_started_sectors(struct request *rq, int count)
   5.323 +{
   5.324 +	request_queue_t *q = rq->q;
   5.325 +	if (q && q->can_throttle) {
   5.326 +		atomic_add(count, &q->nr_sectors);
   5.327 +		if (atomic_read(&q->nr_sectors) < 0) {
   5.328 +			printk("nr_sectors is %d\n", atomic_read(&q->nr_sectors));
   5.329 +			BUG();
   5.330 +		}
   5.331 +	}
   5.332 +}
   5.333 +
   5.334 +static inline void blk_finished_sectors(struct request *rq, int count)
   5.335 +{
   5.336 +	request_queue_t *q = rq->q;
   5.337 +	if (q && q->can_throttle) {
   5.338 +		atomic_sub(count, &q->nr_sectors);
   5.339 +		
   5.340 +		smp_mb();
   5.341 +		if (q->rq.count >= q->batch_requests && !blk_oversized_queue_batch(q)) {
   5.342 +			if (waitqueue_active(&q->wait_for_requests))
   5.343 +				wake_up(&q->wait_for_requests);
   5.344 +		}
   5.345 +		if (atomic_read(&q->nr_sectors) < 0) {
   5.346 +			printk("nr_sectors is %d\n", atomic_read(&q->nr_sectors));
   5.347 +			BUG();
   5.348 +		}
   5.349 +	}
   5.350 +}
   5.351 +
   5.352 +static inline unsigned int blksize_bits(unsigned int size)
   5.353 +{
   5.354 +	unsigned int bits = 8;
   5.355 +	do {
   5.356 +		bits++;
   5.357 +		size >>= 1;
   5.358 +	} while (size > 256);
   5.359 +	return bits;
   5.360 +}
   5.361 +
   5.362 +static inline unsigned int block_size(kdev_t dev)
   5.363 +{
   5.364 +	int retval = BLOCK_SIZE;
   5.365 +	int major = MAJOR(dev);
   5.366 +
   5.367 +	if (blksize_size[major]) {
   5.368 +		int minor = MINOR(dev);
   5.369 +		if (blksize_size[major][minor])
   5.370 +			retval = blksize_size[major][minor];
   5.371 +	}
   5.372 +	return retval;
   5.373 +}
   5.374 +
   5.375 +#endif