ia64/xen-unstable
changeset 233:4c0c962a443f
bitkeeper revision 1.91 (3e5a3727Dlhfakt5fPHI3hlx8R377A)
blkdev.h, xen_block.c:
Sanity checking for blkdev ring arguments.
blkdev.h, xen_block.c:
Sanity checking for blkdev ring arguments.
author | kaf24@labyrinth.cl.cam.ac.uk |
---|---|
date | Mon Feb 24 15:15:51 2003 +0000 (2003-02-24) |
parents | f73ef0280d7e |
children | 682fc9ed30dc |
files | xen-2.4.16/drivers/block/xen_block.c xen-2.4.16/include/xeno/blkdev.h |
line diff
1.1 --- a/xen-2.4.16/drivers/block/xen_block.c Mon Feb 24 14:19:58 2003 +0000 1.2 +++ b/xen-2.4.16/drivers/block/xen_block.c Mon Feb 24 15:15:51 2003 +0000 1.3 @@ -17,7 +17,7 @@ 1.4 #include <xeno/keyhandler.h> 1.5 #include <xeno/interrupt.h> 1.6 1.7 -#if 0 1.8 +#if 1 1.9 #define DPRINTK(_f, _a...) printk( _f , ## _a ) 1.10 #else 1.11 #define DPRINTK(_f, _a...) ((void)0) 1.12 @@ -133,6 +133,21 @@ static void maybe_trigger_io_schedule(vo 1.13 1.14 static void end_block_io_op(struct buffer_head *bh, int uptodate) 1.15 { 1.16 + struct pfn_info *page; 1.17 + unsigned long pfn; 1.18 + 1.19 + for ( pfn = virt_to_phys(bh->b_data) >> PAGE_SHIFT; 1.20 + pfn < ((virt_to_phys(bh->b_data) + bh->b_size + PAGE_SIZE - 1) >> 1.21 + PAGE_SHIFT); 1.22 + pfn++ ) 1.23 + { 1.24 + page = frame_table + pfn; 1.25 + if ( ((bh->b_state & (1 << BH_Read)) != 0) && 1.26 + (put_page_type(page) == 0) ) 1.27 + page->flags &= ~PG_type_mask; 1.28 + put_page_tot(page); 1.29 + } 1.30 + 1.31 atomic_dec(&nr_pending); 1.32 make_response(bh->b_xen_domain, bh->b_xen_id, uptodate ? 0 : 1); 1.33 1.34 @@ -223,22 +238,66 @@ static void dispatch_rw_block_io(struct 1.35 blk_ring_t *blk_ring = p->blk_ring_base; 1.36 struct buffer_head *bh; 1.37 int operation; 1.38 - 1.39 - /* 1.40 - * check to make sure that the block request seems at least 1.41 - * a bit legitimate 1.42 - */ 1.43 - if ( (blk_ring->ring[index].req.block_size & (0x200 - 1)) != 0 ) 1.44 - panic("error: dodgy block size: %d\n", 1.45 - blk_ring->ring[index].req.block_size); 1.46 - 1.47 - if ( blk_ring->ring[index].req.buffer == NULL ) 1.48 - panic("xen_block: bogus buffer from guestOS\n"); 1.49 + unsigned short size; 1.50 + unsigned long buffer, pfn; 1.51 + struct pfn_info *page; 1.52 + 1.53 + operation = (blk_ring->ring[index].req.operation == XEN_BLOCK_WRITE) ? 1.54 + WRITE : READ; 1.55 + 1.56 + /* Sectors are 512 bytes. Make sure request size is a multiple. */ 1.57 + size = blk_ring->ring[index].req.block_size; 1.58 + if ( (size == 0) || (size & (0x200 - 1)) != 0 ) 1.59 + { 1.60 + DPRINTK("dodgy block size: %d\n", 1.61 + blk_ring->ring[index].req.block_size); 1.62 + goto bad_descriptor; 1.63 + } 1.64 + 1.65 + /* Buffer address should be sector aligned. */ 1.66 + buffer = (unsigned long)blk_ring->ring[index].req.buffer; 1.67 + if ( (buffer & (0x200 - 1)) != 0 ) 1.68 + { 1.69 + DPRINTK("unaligned buffer %08lx\n", buffer); 1.70 + goto bad_descriptor; 1.71 + } 1.72 1.73 - DPRINTK("req_cons: %d req_prod %d index: %d, op: %s\n", 1.74 - p->blk_req_cons, blk_ring->req_prod, index, 1.75 - (blk_ring->ring[index].req.operation == XEN_BLOCK_READ ? 1.76 - "read" : "write")); 1.77 + /* A request may span multiple page frames. Each must be checked. */ 1.78 + for ( pfn = buffer >> PAGE_SHIFT; 1.79 + pfn < ((buffer + size + PAGE_SIZE - 1) >> PAGE_SHIFT); 1.80 + pfn++ ) 1.81 + { 1.82 + /* Each frame must be within bounds of machine memory. */ 1.83 + if ( pfn >= max_page ) 1.84 + { 1.85 + DPRINTK("pfn out of range: %08lx\n", pfn); 1.86 + goto bad_descriptor; 1.87 + } 1.88 + 1.89 + page = frame_table + pfn; 1.90 + 1.91 + /* Each frame must belong to the requesting domain. */ 1.92 + if ( (page->flags & PG_domain_mask) != p->domain ) 1.93 + { 1.94 + DPRINTK("bad domain: expected %d, got %ld\n", 1.95 + p->domain, page->flags & PG_domain_mask); 1.96 + goto bad_descriptor; 1.97 + } 1.98 + 1.99 + /* If reading into the frame, the frame must be writeable. */ 1.100 + if ( operation == READ ) 1.101 + { 1.102 + if ( (page->flags & PG_type_mask) != PGT_writeable_page ) 1.103 + { 1.104 + DPRINTK("non-writeable page passed for block read\n"); 1.105 + goto bad_descriptor; 1.106 + } 1.107 + get_page_type(page); 1.108 + } 1.109 + 1.110 + /* Xen holds a frame reference until the operation is complete. */ 1.111 + get_page_tot(page); 1.112 + } 1.113 1.114 atomic_inc(&nr_pending); 1.115 bh = kmem_cache_alloc(buffer_head_cachep, GFP_KERNEL); 1.116 @@ -248,11 +307,10 @@ static void dispatch_rw_block_io(struct 1.117 memset (bh, 0, sizeof (struct buffer_head)); 1.118 1.119 bh->b_blocknr = blk_ring->ring[index].req.block_number; 1.120 - bh->b_size = blk_ring->ring[index].req.block_size; 1.121 + bh->b_size = size; 1.122 bh->b_dev = blk_ring->ring[index].req.device; 1.123 bh->b_rsector = blk_ring->ring[index].req.sector_number; 1.124 - bh->b_data = phys_to_virt((unsigned long) 1.125 - blk_ring->ring[index].req.buffer); 1.126 + bh->b_data = phys_to_virt(buffer); 1.127 bh->b_count.counter = 1; 1.128 bh->b_end_io = end_block_io_op; 1.129 1.130 @@ -260,20 +318,23 @@ static void dispatch_rw_block_io(struct 1.131 bh->b_xen_domain = p; 1.132 bh->b_xen_id = blk_ring->ring[index].req.id; 1.133 1.134 - if ( blk_ring->ring[index].req.operation == XEN_BLOCK_WRITE ) 1.135 + if ( operation == WRITE ) 1.136 { 1.137 - bh->b_state = ((1 << BH_JBD) | (1 << BH_Mapped) | (1 << BH_Req) | 1.138 - (1 << BH_Dirty) | (1 << BH_Uptodate)); 1.139 - operation = WRITE; 1.140 + bh->b_state = (1 << BH_JBD) | (1 << BH_Mapped) | (1 << BH_Req) | 1.141 + (1 << BH_Dirty) | (1 << BH_Uptodate) | (1 << BH_Write); 1.142 } 1.143 else 1.144 { 1.145 - bh->b_state = (1 << BH_Mapped); 1.146 - operation = READ; 1.147 + bh->b_state = (1 << BH_Mapped) | (1 << BH_Read); 1.148 } 1.149 1.150 /* Dispatch a single request. We'll flush it to disc later. */ 1.151 ll_rw_block(operation, 1, &bh); 1.152 + return; 1.153 + 1.154 + bad_descriptor: 1.155 + make_response(p, blk_ring->ring[index].req.id, 1); 1.156 + return; 1.157 } 1.158 1.159
2.1 --- a/xen-2.4.16/include/xeno/blkdev.h Mon Feb 24 14:19:58 2003 +0000 2.2 +++ b/xen-2.4.16/include/xeno/blkdev.h Mon Feb 24 15:15:51 2003 +0000 2.3 @@ -45,7 +45,6 @@ struct block_device_operations { 2.4 }; 2.5 2.6 2.7 -/*** BUFFER_HEAD stuff: maybe this will die, or live on in reduced form */ 2.8 enum bh_state_bits { 2.9 BH_Uptodate, /* 1 if the buffer contains valid data */ 2.10 BH_Dirty, /* 1 if the buffer is dirty */ 2.11 @@ -57,10 +56,8 @@ enum bh_state_bits { 2.12 BH_Wait_IO, /* 1 if we should write out this buffer */ 2.13 BH_Launder, /* 1 if we can throttle on this buffer */ 2.14 BH_JBD, /* 1 if it has an attached journal_head */ 2.15 - 2.16 - BH_PrivateStart,/* not a state bit, but the first bit available 2.17 - * for private allocation by other entities 2.18 - */ 2.19 + BH_Read, /* 1 if request is a read from disc */ 2.20 + BH_Write /* 1 if request is a write to disc */ 2.21 }; 2.22 2.23 struct buffer_head {