direct-io.hg
changeset 2205:0213aef0e364
bitkeeper revision 1.1159.1.44 (411b841bXblyvUBac_A9DfW5Bt7QGw)
Merge http://xen.bkbits.net:8080/xeno-unstable.bk
into gandalf.hpl.hp.com:/var/bk/xeno-unstable.bk
Merge http://xen.bkbits.net:8080/xeno-unstable.bk
into gandalf.hpl.hp.com:/var/bk/xeno-unstable.bk
author | xenbk@gandalf.hpl.hp.com |
---|---|
date | Thu Aug 12 14:52:11 2004 +0000 (2004-08-12) |
parents | 294fd489740a be6e22d4e208 |
children | 0667ac4c62f5 |
files | linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/main.c linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c xen/arch/x86/memory.c |
line diff
1.1 --- a/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/main.c Thu Aug 12 14:44:06 2004 +0000 1.2 +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/main.c Thu Aug 12 14:52:11 2004 +0000 1.3 @@ -61,30 +61,34 @@ static unsigned long sg_next_sect; 1.4 #define DISABLE_SCATTERGATHER() (sg_operation = -1) 1.5 1.6 1.7 -inline void translate_req_to_pfn( blkif_request_t * xreq, blkif_request_t * req) 1.8 +static inline void translate_req_to_pfn(blkif_request_t *xreq, 1.9 + blkif_request_t *req) 1.10 { 1.11 int i; 1.12 1.13 - *xreq=*req; 1.14 - for ( i=0; i<req->nr_segments; i++ ) 1.15 - { 1.16 - xreq->frame_and_sects[i] = (req->frame_and_sects[i] & ~PAGE_MASK) | 1.17 - (machine_to_phys_mapping[req->frame_and_sects[i]>>PAGE_SHIFT]<<PAGE_SHIFT); 1.18 + *xreq = *req; 1.19 + 1.20 + for ( i = 0; i < req->nr_segments; i++ ) 1.21 + { 1.22 + xreq->frame_and_sects[i] = (req->frame_and_sects[i] & ~PAGE_MASK) | 1.23 + (machine_to_phys_mapping[req->frame_and_sects[i] >> PAGE_SHIFT] << 1.24 + PAGE_SHIFT); 1.25 } 1.26 - return xreq; 1.27 } 1.28 1.29 -inline void translate_req_to_mfn( blkif_request_t * xreq, blkif_request_t * req) 1.30 +static inline void translate_req_to_mfn(blkif_request_t *xreq, 1.31 + blkif_request_t *req) 1.32 { 1.33 int i; 1.34 1.35 - *xreq=*req; 1.36 - for ( i=0; i<req->nr_segments; i++ ) 1.37 - { 1.38 - xreq->frame_and_sects[i] = (req->frame_and_sects[i] & ~PAGE_MASK) | 1.39 - (phys_to_machine_mapping[req->frame_and_sects[i]>>PAGE_SHIFT]<<PAGE_SHIFT); 1.40 + *xreq = *req; 1.41 + 1.42 + for ( i = 0; i < req->nr_segments; i++ ) 1.43 + { 1.44 + xreq->frame_and_sects[i] = (req->frame_and_sects[i] & ~PAGE_MASK) | 1.45 + (phys_to_machine_mapping[req->frame_and_sects[i] >> PAGE_SHIFT] << 1.46 + PAGE_SHIFT); 1.47 } 1.48 - return xreq; 1.49 } 1.50 1.51 1.52 @@ -170,7 +174,7 @@ int blkif_release(struct inode *inode, s 1.53 1.54 1.55 int blkif_ioctl(struct inode *inode, struct file *filep, 1.56 - unsigned command, unsigned long argument) 1.57 + unsigned command, unsigned long argument) 1.58 { 1.59 kdev_t dev = inode->i_rdev; 1.60 struct hd_geometry *geo = (struct hd_geometry *)argument; 1.61 @@ -392,8 +396,8 @@ static int blkif_queue_request(unsigned 1.62 DISABLE_SCATTERGATHER(); 1.63 1.64 /* Update the copy of the request in the recovery ring. */ 1.65 - translate_req_to_pfn(&blk_ring_rec->ring[ 1.66 - MASK_BLKIF_IDX(blk_ring_rec->req_prod - 1)].req, req); 1.67 + translate_req_to_pfn(&blk_ring_rec->ring[ 1.68 + MASK_BLKIF_IDX(blk_ring_rec->req_prod - 1)].req, req); 1.69 1.70 return 0; 1.71 } 1.72 @@ -425,7 +429,7 @@ static int blkif_queue_request(unsigned 1.73 1.74 /* Keep a private copy so we can reissue requests when recovering. */ 1.75 translate_req_to_pfn(&blk_ring_rec->ring[ 1.76 - MASK_BLKIF_IDX(blk_ring_rec->req_prod)].req, req); 1.77 + MASK_BLKIF_IDX(blk_ring_rec->req_prod)].req, req); 1.78 blk_ring_rec->req_prod++; 1.79 1.80 return 0; 1.81 @@ -602,7 +606,7 @@ void blkif_control_send(blkif_request_t 1.82 blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req = *req; 1.83 1.84 translate_req_to_pfn(&blk_ring_rec->ring[ 1.85 - MASK_BLKIF_IDX(blk_ring_rec->req_prod++)].req,req); 1.86 + MASK_BLKIF_IDX(blk_ring_rec->req_prod++)].req,req); 1.87 1.88 req_prod++; 1.89 flush_requests(); 1.90 @@ -693,27 +697,29 @@ static void blkif_status_change(blkif_fe 1.91 { 1.92 int i,j; 1.93 1.94 - /* Shouldn't need the io_request_lock here - the device is 1.95 - * plugged and the recovery flag prevents the interrupt handler 1.96 - * changing anything. */ 1.97 + /* 1.98 + * Shouldn't need the io_request_lock here - the device is plugged 1.99 + * and the recovery flag prevents the interrupt handler changing 1.100 + * anything. 1.101 + */ 1.102 1.103 /* Reissue requests from the private block ring. */ 1.104 for ( i = 0; 1.105 - resp_cons_rec < blk_ring_rec->req_prod; 1.106 + resp_cons_rec < blk_ring_rec->req_prod; 1.107 resp_cons_rec++, i++ ) 1.108 { 1.109 - translate_req_to_mfn(&blk_ring->ring[i].req, 1.110 - &blk_ring_rec->ring[ 1.111 - MASK_BLKIF_IDX(resp_cons_rec)].req); 1.112 + translate_req_to_mfn( 1.113 + &blk_ring->ring[i].req, 1.114 + &blk_ring_rec->ring[MASK_BLKIF_IDX(resp_cons_rec)].req); 1.115 } 1.116 1.117 - /* Reset the private block ring to match the new ring. */ 1.118 - for( j=0; j<i; j++ ) 1.119 - { 1.120 - translate_req_to_pfn( 1.121 - &blk_ring_rec->ring[j].req, 1.122 - &blk_ring->ring[j].req); 1.123 - } 1.124 + /* Reset the private block ring to match the new ring. */ 1.125 + for ( j = 0; j < i; j++ ) 1.126 + { 1.127 + translate_req_to_pfn( 1.128 + &blk_ring_rec->ring[j].req, 1.129 + &blk_ring->ring[j].req); 1.130 + } 1.131 1.132 resp_cons_rec = 0; 1.133 1.134 @@ -788,7 +794,7 @@ int __init xlblk_init(void) 1.135 blkif_fe_driver_status_changed_t st; 1.136 1.137 if ( (start_info.flags & SIF_INITDOMAIN) 1.138 - || (start_info.flags & SIF_BLK_BE_DOMAIN) ) 1.139 + || (start_info.flags & SIF_BLK_BE_DOMAIN) ) 1.140 return 0; 1.141 1.142 printk(KERN_INFO "Initialising Xen virtual block device\n"); 1.143 @@ -821,25 +827,10 @@ int __init xlblk_init(void) 1.144 return 0; 1.145 } 1.146 1.147 - 1.148 -static void __exit xlblk_cleanup(void) 1.149 -{ 1.150 - /* XXX FIXME */ 1.151 - BUG(); 1.152 -} 1.153 - 1.154 - 1.155 -#ifdef MODULE 1.156 -module_init(xlblk_init); 1.157 -module_exit(xlblk_cleanup); 1.158 -#endif 1.159 - 1.160 - 1.161 void blkdev_suspend(void) 1.162 { 1.163 } 1.164 1.165 - 1.166 void blkdev_resume(void) 1.167 { 1.168 ctrl_msg_t cmsg; 1.169 @@ -853,4 +844,3 @@ void blkdev_resume(void) 1.170 memcpy(cmsg.msg, &st, sizeof(st)); 1.171 ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); 1.172 } 1.173 -
2.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c Thu Aug 12 14:44:06 2004 +0000 2.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c Thu Aug 12 14:52:11 2004 +0000 2.3 @@ -38,8 +38,8 @@ static int recovery = 0; /* "R 2.4 * by the blkif_io_lock */ 2.5 2.6 /* We plug the I/O ring if the driver is suspended or if the ring is full. */ 2.7 -#define BLKIF_RING_FULL (((req_prod - resp_cons) == BLKIF_RING_SIZE) || \ 2.8 - (blkif_state != BLKIF_STATE_CONNECTED)) 2.9 +#define BLKIF_RING_FULL (((req_prod - resp_cons) == BLKIF_RING_SIZE) || \ 2.10 + (blkif_state != BLKIF_STATE_CONNECTED)) 2.11 2.12 /* 2.13 * Request queues with outstanding work, but ring is currently full. 2.14 @@ -50,38 +50,40 @@ static int recovery = 0; /* "R 2.15 static request_queue_t *pending_queues[MAX_PENDING]; 2.16 static int nr_pending; 2.17 2.18 -inline void translate_req_to_pfn( blkif_request_t * xreq, blkif_request_t * req) 2.19 +static inline void translate_req_to_pfn(blkif_request_t *xreq, 2.20 + blkif_request_t *req) 2.21 { 2.22 int i; 2.23 2.24 - *xreq=*req; 2.25 - for ( i=0; i<req->nr_segments; i++ ) 2.26 - { 2.27 - xreq->frame_and_sects[i] = (req->frame_and_sects[i] & ~PAGE_MASK) | 2.28 - (machine_to_phys_mapping[req->frame_and_sects[i]>>PAGE_SHIFT]<<PAGE_SHIFT); 2.29 + *xreq = *req; 2.30 + 2.31 + for ( i = 0; i < req->nr_segments; i++ ) 2.32 + { 2.33 + xreq->frame_and_sects[i] = (req->frame_and_sects[i] & ~PAGE_MASK) | 2.34 + (machine_to_phys_mapping[req->frame_and_sects[i] >> PAGE_SHIFT] << 2.35 + PAGE_SHIFT); 2.36 } 2.37 - return xreq; 2.38 } 2.39 2.40 -inline void translate_req_to_mfn( blkif_request_t * xreq, blkif_request_t * req) 2.41 +static inline void translate_req_to_mfn(blkif_request_t *xreq, 2.42 + blkif_request_t *req) 2.43 { 2.44 int i; 2.45 2.46 - *xreq=*req; 2.47 - for ( i=0; i<req->nr_segments; i++ ) 2.48 - { 2.49 - xreq->frame_and_sects[i] = (req->frame_and_sects[i] & ~PAGE_MASK) | 2.50 - (phys_to_machine_mapping[req->frame_and_sects[i]>>PAGE_SHIFT]<<PAGE_SHIFT); 2.51 + *xreq = *req; 2.52 + 2.53 + for ( i = 0; i < req->nr_segments; i++ ) 2.54 + { 2.55 + xreq->frame_and_sects[i] = (req->frame_and_sects[i] & ~PAGE_MASK) | 2.56 + (phys_to_machine_mapping[req->frame_and_sects[i] >> PAGE_SHIFT] << 2.57 + PAGE_SHIFT); 2.58 } 2.59 - return xreq; 2.60 } 2.61 2.62 static inline void flush_requests(void) 2.63 { 2.64 - 2.65 - blk_ring->req_prod = req_prod; 2.66 - 2.67 - notify_via_evtchn(blkif_evtchn); 2.68 + blk_ring->req_prod = req_prod; 2.69 + notify_via_evtchn(blkif_evtchn); 2.70 } 2.71 2.72 2.73 @@ -101,57 +103,57 @@ static void update_vbds_task(void *unuse 2.74 2.75 int blkif_open(struct inode *inode, struct file *filep) 2.76 { 2.77 - struct gendisk *gd = inode->i_bdev->bd_disk; 2.78 - struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data; 2.79 + struct gendisk *gd = inode->i_bdev->bd_disk; 2.80 + struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data; 2.81 2.82 - /* Update of usage count is protected by per-device semaphore. */ 2.83 - di->mi->usage++; 2.84 - 2.85 - return 0; 2.86 + /* Update of usage count is protected by per-device semaphore. */ 2.87 + di->mi->usage++; 2.88 + 2.89 + return 0; 2.90 } 2.91 2.92 2.93 int blkif_release(struct inode *inode, struct file *filep) 2.94 { 2.95 - struct gendisk *gd = inode->i_bdev->bd_disk; 2.96 - struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data; 2.97 + struct gendisk *gd = inode->i_bdev->bd_disk; 2.98 + struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data; 2.99 2.100 - /* 2.101 - * When usage drops to zero it may allow more VBD updates to occur. 2.102 - * Update of usage count is protected by a per-device semaphore. 2.103 - */ 2.104 - if (--di->mi->usage == 0) { 2.105 + /* 2.106 + * When usage drops to zero it may allow more VBD updates to occur. 2.107 + * Update of usage count is protected by a per-device semaphore. 2.108 + */ 2.109 + if (--di->mi->usage == 0) { 2.110 #if 0 2.111 - update_tq.routine = update_vbds_task; 2.112 - schedule_task(&update_tq); 2.113 + update_tq.routine = update_vbds_task; 2.114 + schedule_task(&update_tq); 2.115 #endif 2.116 - } 2.117 + } 2.118 2.119 - return 0; 2.120 + return 0; 2.121 } 2.122 2.123 2.124 int blkif_ioctl(struct inode *inode, struct file *filep, 2.125 - unsigned command, unsigned long argument) 2.126 + unsigned command, unsigned long argument) 2.127 { 2.128 - /* struct gendisk *gd = inode->i_bdev->bd_disk; */ 2.129 + /* struct gendisk *gd = inode->i_bdev->bd_disk; */ 2.130 2.131 - DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", 2.132 - command, (long)argument, inode->i_rdev); 2.133 + DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", 2.134 + command, (long)argument, inode->i_rdev); 2.135 2.136 - switch (command) { 2.137 + switch (command) { 2.138 2.139 - case HDIO_GETGEO: 2.140 - /* return ENOSYS to use defaults */ 2.141 - return -ENOSYS; 2.142 + case HDIO_GETGEO: 2.143 + /* return ENOSYS to use defaults */ 2.144 + return -ENOSYS; 2.145 2.146 - default: 2.147 - printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", 2.148 - command); 2.149 - return -ENOSYS; 2.150 - } 2.151 + default: 2.152 + printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", 2.153 + command); 2.154 + return -ENOSYS; 2.155 + } 2.156 2.157 - return 0; 2.158 + return 0; 2.159 } 2.160 2.161 #if 0 2.162 @@ -227,55 +229,55 @@ int blkif_revalidate(kdev_t dev) 2.163 */ 2.164 static int blkif_queue_request(struct request *req) 2.165 { 2.166 - struct xlbd_disk_info *di = 2.167 - (struct xlbd_disk_info *)req->rq_disk->private_data; 2.168 - unsigned long buffer_ma; 2.169 - blkif_request_t *ring_req; 2.170 - struct bio *bio; 2.171 - struct bio_vec *bvec; 2.172 - int idx, s; 2.173 - unsigned int fsect, lsect; 2.174 + struct xlbd_disk_info *di = 2.175 + (struct xlbd_disk_info *)req->rq_disk->private_data; 2.176 + unsigned long buffer_ma; 2.177 + blkif_request_t *ring_req; 2.178 + struct bio *bio; 2.179 + struct bio_vec *bvec; 2.180 + int idx, s; 2.181 + unsigned int fsect, lsect; 2.182 2.183 - if (unlikely(blkif_state != BLKIF_STATE_CONNECTED)) 2.184 - return 1; 2.185 + if (unlikely(blkif_state != BLKIF_STATE_CONNECTED)) 2.186 + return 1; 2.187 2.188 - /* Fill out a communications ring structure. */ 2.189 - ring_req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req; 2.190 - ring_req->id = (unsigned long)req; 2.191 - ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : 2.192 - BLKIF_OP_READ; 2.193 - ring_req->sector_number = (blkif_sector_t)req->sector; 2.194 - ring_req->device = di->xd_device; 2.195 + /* Fill out a communications ring structure. */ 2.196 + ring_req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req; 2.197 + ring_req->id = (unsigned long)req; 2.198 + ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : 2.199 + BLKIF_OP_READ; 2.200 + ring_req->sector_number = (blkif_sector_t)req->sector; 2.201 + ring_req->device = di->xd_device; 2.202 2.203 - s = 0; 2.204 - ring_req->nr_segments = 0; 2.205 - rq_for_each_bio(bio, req) { 2.206 - bio_for_each_segment(bvec, bio, idx) { 2.207 - buffer_ma = page_to_phys(bvec->bv_page); 2.208 - if (unlikely((buffer_ma & ((1<<9)-1)) != 0)) 2.209 - BUG(); 2.210 + s = 0; 2.211 + ring_req->nr_segments = 0; 2.212 + rq_for_each_bio(bio, req) { 2.213 + bio_for_each_segment(bvec, bio, idx) { 2.214 + buffer_ma = page_to_phys(bvec->bv_page); 2.215 + if (unlikely((buffer_ma & ((1<<9)-1)) != 0)) 2.216 + BUG(); 2.217 2.218 - fsect = bvec->bv_offset >> 9; 2.219 - lsect = fsect + (bvec->bv_len >> 9) - 1; 2.220 - if (unlikely(lsect > 7)) 2.221 - BUG(); 2.222 + fsect = bvec->bv_offset >> 9; 2.223 + lsect = fsect + (bvec->bv_len >> 9) - 1; 2.224 + if (unlikely(lsect > 7)) 2.225 + BUG(); 2.226 2.227 - ring_req->frame_and_sects[ring_req->nr_segments++] = 2.228 - buffer_ma | (fsect << 3) | lsect; 2.229 - s += bvec->bv_len >> 9; 2.230 - } 2.231 - } 2.232 + ring_req->frame_and_sects[ring_req->nr_segments++] = 2.233 + buffer_ma | (fsect << 3) | lsect; 2.234 + s += bvec->bv_len >> 9; 2.235 + } 2.236 + } 2.237 2.238 - req_prod++; 2.239 + req_prod++; 2.240 2.241 - /* Keep a private copy so we can reissue requests when recovering. */ 2.242 - translate_req_to_pfn( 2.243 - &blk_ring_rec->ring[MASK_BLKIF_IDX(blk_ring_rec->req_prod)].req, 2.244 - ring_req); 2.245 + /* Keep a private copy so we can reissue requests when recovering. */ 2.246 + translate_req_to_pfn( 2.247 + &blk_ring_rec->ring[MASK_BLKIF_IDX(blk_ring_rec->req_prod)].req, 2.248 + ring_req); 2.249 2.250 - blk_ring_rec->req_prod++; 2.251 + blk_ring_rec->req_prod++; 2.252 2.253 - return 0; 2.254 + return 0; 2.255 } 2.256 2.257 /* 2.258 @@ -284,37 +286,37 @@ static int blkif_queue_request(struct re 2.259 */ 2.260 void do_blkif_request(request_queue_t *rq) 2.261 { 2.262 - struct request *req; 2.263 - int queued; 2.264 + struct request *req; 2.265 + int queued; 2.266 2.267 - DPRINTK("Entered do_blkif_request\n"); 2.268 + DPRINTK("Entered do_blkif_request\n"); 2.269 2.270 - queued = 0; 2.271 + queued = 0; 2.272 2.273 - while ((req = elv_next_request(rq)) != NULL) { 2.274 - if (!blk_fs_request(req)) { 2.275 - end_request(req, 0); 2.276 - continue; 2.277 - } 2.278 + while ((req = elv_next_request(rq)) != NULL) { 2.279 + if (!blk_fs_request(req)) { 2.280 + end_request(req, 0); 2.281 + continue; 2.282 + } 2.283 2.284 - if (BLKIF_RING_FULL) { 2.285 - blk_stop_queue(rq); 2.286 - break; 2.287 - } 2.288 - DPRINTK("do_blkif_request %p: cmd %p, sec %lx, (%u/%li) buffer:%p [%s]\n", 2.289 - req, req->cmd, req->sector, req->current_nr_sectors, 2.290 - req->nr_sectors, req->buffer, 2.291 - rq_data_dir(req) ? "write" : "read"); 2.292 - blkdev_dequeue_request(req); 2.293 - if (blkif_queue_request(req)) { 2.294 - blk_stop_queue(rq); 2.295 - break; 2.296 - } 2.297 - queued++; 2.298 - } 2.299 + if (BLKIF_RING_FULL) { 2.300 + blk_stop_queue(rq); 2.301 + break; 2.302 + } 2.303 + DPRINTK("do_blkif_request %p: cmd %p, sec %lx, (%u/%li) buffer:%p [%s]\n", 2.304 + req, req->cmd, req->sector, req->current_nr_sectors, 2.305 + req->nr_sectors, req->buffer, 2.306 + rq_data_dir(req) ? "write" : "read"); 2.307 + blkdev_dequeue_request(req); 2.308 + if (blkif_queue_request(req)) { 2.309 + blk_stop_queue(rq); 2.310 + break; 2.311 + } 2.312 + queued++; 2.313 + } 2.314 2.315 - if (queued != 0) 2.316 - flush_requests(); 2.317 + if (queued != 0) 2.318 + flush_requests(); 2.319 } 2.320 2.321 2.322 @@ -338,82 +340,82 @@ THIS CODE SHOULD BE REMOVED WHEN WE HAVE 2.323 static void blkif_completion(blkif_response_t *bret, struct request *req) 2.324 { 2.325 #if 0 2.326 - struct bio *bio; 2.327 - struct bio_vec *bvec; 2.328 - int idx; 2.329 - unsigned long mfn, pfn; 2.330 + struct bio *bio; 2.331 + struct bio_vec *bvec; 2.332 + int idx; 2.333 + unsigned long mfn, pfn; 2.334 2.335 - if( bret->operation == BLKIF_OP_READ ) 2.336 - { 2.337 - rq_for_each_bio(bio, req) { 2.338 - bio_for_each_segment(bvec, bio, idx) { 2.339 - mfn = page_to_phys(bvec->bv_page)>>PAGE_SHIFT; 2.340 - pfn = machine_to_phys_mapping[mfn]; 2.341 - queue_machphys_update(mfn, pfn); 2.342 - } 2.343 - } 2.344 - } 2.345 + if( bret->operation == BLKIF_OP_READ ) 2.346 + { 2.347 + rq_for_each_bio(bio, req) { 2.348 + bio_for_each_segment(bvec, bio, idx) { 2.349 + mfn = page_to_phys(bvec->bv_page)>>PAGE_SHIFT; 2.350 + pfn = machine_to_phys_mapping[mfn]; 2.351 + queue_machphys_update(mfn, pfn); 2.352 + } 2.353 + } 2.354 + } 2.355 #endif 2.356 } 2.357 2.358 2.359 static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs) 2.360 { 2.361 - struct request *req; 2.362 - blkif_response_t *bret; 2.363 - BLKIF_RING_IDX i; 2.364 - unsigned long flags; 2.365 + struct request *req; 2.366 + blkif_response_t *bret; 2.367 + BLKIF_RING_IDX i; 2.368 + unsigned long flags; 2.369 2.370 - spin_lock_irqsave(&blkif_io_lock, flags); 2.371 + spin_lock_irqsave(&blkif_io_lock, flags); 2.372 2.373 - if (unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery)) { 2.374 - printk("Bailed out\n"); 2.375 + if (unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery)) { 2.376 + printk("Bailed out\n"); 2.377 2.378 - spin_unlock_irqrestore(&blkif_io_lock, flags); 2.379 - return IRQ_HANDLED; 2.380 - } 2.381 + spin_unlock_irqrestore(&blkif_io_lock, flags); 2.382 + return IRQ_HANDLED; 2.383 + } 2.384 2.385 - for (i = resp_cons; i != blk_ring->resp_prod; i++) { 2.386 - bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp; 2.387 - switch (bret->operation) { 2.388 - case BLKIF_OP_READ: 2.389 - case BLKIF_OP_WRITE: 2.390 - if (unlikely(bret->status != BLKIF_RSP_OKAY)) 2.391 - DPRINTK("Bad return from blkdev data request: %lx\n", 2.392 - bret->status); 2.393 - req = (struct request *)bret->id; 2.394 - /* XXXcl pass up status */ 2.395 - if (unlikely(end_that_request_first(req, 1, 2.396 - req->hard_nr_sectors))) 2.397 - BUG(); 2.398 + for (i = resp_cons; i != blk_ring->resp_prod; i++) { 2.399 + bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp; 2.400 + switch (bret->operation) { 2.401 + case BLKIF_OP_READ: 2.402 + case BLKIF_OP_WRITE: 2.403 + if (unlikely(bret->status != BLKIF_RSP_OKAY)) 2.404 + DPRINTK("Bad return from blkdev data request: %lx\n", 2.405 + bret->status); 2.406 + req = (struct request *)bret->id; 2.407 + /* XXXcl pass up status */ 2.408 + if (unlikely(end_that_request_first(req, 1, 2.409 + req->hard_nr_sectors))) 2.410 + BUG(); 2.411 2.412 - end_that_request_last(req); 2.413 - blkif_completion( bret, req ); 2.414 - break; 2.415 - case BLKIF_OP_PROBE: 2.416 - memcpy(&blkif_control_rsp, bret, sizeof(*bret)); 2.417 - blkif_control_rsp_valid = 1; 2.418 - break; 2.419 - default: 2.420 - BUG(); 2.421 - } 2.422 - } 2.423 + end_that_request_last(req); 2.424 + blkif_completion( bret, req ); 2.425 + break; 2.426 + case BLKIF_OP_PROBE: 2.427 + memcpy(&blkif_control_rsp, bret, sizeof(*bret)); 2.428 + blkif_control_rsp_valid = 1; 2.429 + break; 2.430 + default: 2.431 + BUG(); 2.432 + } 2.433 + } 2.434 2.435 - resp_cons = i; 2.436 - resp_cons_rec = i; 2.437 + resp_cons = i; 2.438 + resp_cons_rec = i; 2.439 2.440 - if (xlbd_blk_queue && 2.441 - test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags)) { 2.442 - blk_start_queue(xlbd_blk_queue); 2.443 - /* XXXcl call to request_fn should not be needed but 2.444 - * we get stuck without... needs investigating 2.445 - */ 2.446 - xlbd_blk_queue->request_fn(xlbd_blk_queue); 2.447 - } 2.448 + if (xlbd_blk_queue && 2.449 + test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags)) { 2.450 + blk_start_queue(xlbd_blk_queue); 2.451 + /* XXXcl call to request_fn should not be needed but 2.452 + * we get stuck without... needs investigating 2.453 + */ 2.454 + xlbd_blk_queue->request_fn(xlbd_blk_queue); 2.455 + } 2.456 2.457 - spin_unlock_irqrestore(&blkif_io_lock, flags); 2.458 + spin_unlock_irqrestore(&blkif_io_lock, flags); 2.459 2.460 - return IRQ_HANDLED; 2.461 + return IRQ_HANDLED; 2.462 } 2.463 2.464 2.465 @@ -436,8 +438,9 @@ void blkif_control_send(blkif_request_t 2.466 } 2.467 2.468 blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req = *req; 2.469 - translate_req_to_pfn(&blk_ring_rec->ring[ 2.470 - MASK_BLKIF_IDX(blk_ring_rec->req_prod++)].req,req); 2.471 + translate_req_to_pfn( 2.472 + &blk_ring_rec->ring[MASK_BLKIF_IDX(blk_ring_rec->req_prod++)].req, 2.473 + req); 2.474 2.475 req_prod++; 2.476 flush_requests(); 2.477 @@ -528,27 +531,29 @@ static void blkif_status_change(blkif_fe 2.478 { 2.479 int i,j; 2.480 2.481 - /* Shouldn't need the io_request_lock here - the device is 2.482 - * plugged and the recovery flag prevents the interrupt handler 2.483 - * changing anything. */ 2.484 + /* 2.485 + * Shouldn't need the io_request_lock here - the device is plugged 2.486 + * and the recovery flag prevents the interrupt handler changing 2.487 + * anything. 2.488 + */ 2.489 2.490 /* Reissue requests from the private block ring. */ 2.491 for ( i = 0; 2.492 - resp_cons_rec < blk_ring_rec->req_prod; 2.493 + resp_cons_rec < blk_ring_rec->req_prod; 2.494 resp_cons_rec++, i++ ) 2.495 { 2.496 - translate_req_to_mfn(&blk_ring->ring[i].req, 2.497 - &blk_ring_rec->ring[ 2.498 - MASK_BLKIF_IDX(resp_cons_rec)].req); 2.499 + translate_req_to_mfn( 2.500 + &blk_ring->ring[i].req, 2.501 + &blk_ring_rec->ring[MASK_BLKIF_IDX(resp_cons_rec)].req); 2.502 } 2.503 2.504 - /* Reset the private block ring to match the new ring. */ 2.505 - for( j=0; j<i; j++ ) 2.506 - { 2.507 - translate_req_to_pfn( 2.508 - &blk_ring_rec->ring[j].req, 2.509 - &blk_ring->ring[j].req); 2.510 - } 2.511 + /* Reset the private block ring to match the new ring. */ 2.512 + for( j = 0; j < i; j++ ) 2.513 + { 2.514 + translate_req_to_pfn( 2.515 + &blk_ring_rec->ring[j].req, 2.516 + &blk_ring->ring[j].req); 2.517 + } 2.518 2.519 resp_cons_rec = 0; 2.520 2.521 @@ -623,7 +628,7 @@ int __init xlblk_init(void) 2.522 blkif_fe_driver_status_changed_t st; 2.523 2.524 if ( (start_info.flags & SIF_INITDOMAIN) 2.525 - || (start_info.flags & SIF_BLK_BE_DOMAIN) ) 2.526 + || (start_info.flags & SIF_BLK_BE_DOMAIN) ) 2.527 return 0; 2.528 2.529 printk(KERN_INFO "Initialising Xen virtual block device\n"); 2.530 @@ -654,62 +659,13 @@ int __init xlblk_init(void) 2.531 } 2.532 2.533 return 0; 2.534 -#if 0 2.535 - int error; 2.536 - 2.537 - reset_xlblk_interface(); 2.538 - 2.539 - xlblk_response_irq = bind_virq_to_irq(VIRQ_BLKDEV); 2.540 - xlblk_update_irq = bind_virq_to_irq(VIRQ_VBD_UPD); 2.541 - 2.542 - error = request_irq(xlblk_response_irq, xlblk_response_int, 2.543 - SA_SAMPLE_RANDOM, "blkdev", NULL); 2.544 - if (error) { 2.545 - printk(KERN_ALERT "Could not allocate receive interrupt\n"); 2.546 - goto fail; 2.547 - } 2.548 - 2.549 - error = request_irq(xlblk_update_irq, xlblk_update_int, 2.550 - 0, "blkdev", NULL); 2.551 - if (error) { 2.552 - printk(KERN_ALERT 2.553 - "Could not allocate block update interrupt\n"); 2.554 - goto fail; 2.555 - } 2.556 - 2.557 - (void)xlvbd_init(); 2.558 - 2.559 - return 0; 2.560 - 2.561 - fail: 2.562 - return error; 2.563 -#endif 2.564 } 2.565 - 2.566 - 2.567 -static void __exit xlblk_cleanup(void) 2.568 -{ 2.569 - /* XXX FIXME */ 2.570 - BUG(); 2.571 -#if 0 2.572 - /* xlvbd_cleanup(); */ 2.573 - free_irq(xlblk_response_irq, NULL); 2.574 - free_irq(xlblk_update_irq, NULL); 2.575 - unbind_virq_from_irq(VIRQ_BLKDEV); 2.576 - unbind_virq_from_irq(VIRQ_VBD_UPD); 2.577 -#endif 2.578 -} 2.579 - 2.580 - 2.581 -module_init(xlblk_init); 2.582 -module_exit(xlblk_cleanup); 2.583 - 2.584 +__initcall(xlblk_init); 2.585 2.586 void blkdev_suspend(void) 2.587 { 2.588 } 2.589 2.590 - 2.591 void blkdev_resume(void) 2.592 { 2.593 ctrl_msg_t cmsg;
3.1 --- a/xen/arch/x86/memory.c Thu Aug 12 14:44:06 2004 +0000 3.2 +++ b/xen/arch/x86/memory.c Thu Aug 12 14:52:11 2004 +0000 3.3 @@ -1401,8 +1401,7 @@ void ptwr_flush_inactive(void) 3.4 3.5 int ptwr_do_page_fault(unsigned long addr) 3.6 { 3.7 - /* write page fault, check if we're trying to modify an l1 3.8 - page table */ 3.9 + /* write page fault, check if we're trying to modify an l1 page table */ 3.10 unsigned long pte, pfn; 3.11 struct pfn_info *page; 3.12 l2_pgentry_t *pl2e; 3.13 @@ -1412,19 +1411,21 @@ int ptwr_do_page_fault(unsigned long add 3.14 PTWR_PRINTK(("get user %p for va %08lx\n", 3.15 &linear_pg_table[addr>>PAGE_SHIFT], addr)); 3.16 #endif 3.17 - if (l2_pgentry_val(linear_l2_table[addr >> L2_PAGETABLE_SHIFT]) & 3.18 - _PAGE_PRESENT && 3.19 - __get_user(pte, (unsigned long *) 3.20 - &linear_pg_table[addr >> PAGE_SHIFT]) == 0) { 3.21 + if ( (l2_pgentry_val(linear_l2_table[addr >> L2_PAGETABLE_SHIFT]) & 3.22 + _PAGE_PRESENT) && 3.23 + (__get_user(pte, (unsigned long *) 3.24 + &linear_pg_table[addr >> PAGE_SHIFT]) == 0) ) 3.25 + { 3.26 pfn = pte >> PAGE_SHIFT; 3.27 #if 0 3.28 PTWR_PRINTK(("check pte %08lx = pfn %08lx for va %08lx\n", pte, pfn, 3.29 addr)); 3.30 #endif 3.31 page = &frame_table[pfn]; 3.32 - if ((page->u.inuse.type_info & PGT_type_mask) == PGT_l1_page_table) { 3.33 + if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_l1_page_table ) 3.34 + { 3.35 #ifdef PTWR_TRACK_DOMAIN 3.36 - if (ptwr_info[cpu].domain != get_current()->domain) 3.37 + if ( ptwr_info[cpu].domain != get_current()->domain ) 3.38 printk("ptwr_do_page_fault domain mismatch %d != %d\n", 3.39 ptwr_info[cpu].domain, get_current()->domain); 3.40 #endif 3.41 @@ -1434,7 +1435,9 @@ int ptwr_do_page_fault(unsigned long add 3.42 "pfn %08lx\n", addr, 3.43 ((page->u.inuse.type_info & PGT_va_mask) >> 3.44 PGT_va_shift) << L2_PAGETABLE_SHIFT, pfn)); 3.45 - if (l2_pgentry_val(*pl2e) >> PAGE_SHIFT != pfn) { 3.46 + 3.47 + if ( l2_pgentry_val(*pl2e) >> PAGE_SHIFT != pfn ) 3.48 + { 3.49 l1_pgentry_t *pl1e; 3.50 PTWR_PRINTK(("[I] freeing l1 page %p taf %08x/%08x\n", page, 3.51 page->u.inuse.type_info, 3.52 @@ -1450,10 +1453,12 @@ int ptwr_do_page_fault(unsigned long add 3.53 unmap_domain_mem(pl1e); 3.54 3.55 ptwr_info[cpu].writable_idx++; 3.56 - } else { 3.57 + } 3.58 + else 3.59 + { 3.60 l2_pgentry_t nl2e; 3.61 l1_pgentry_t *pl1e; 3.62 - if (ptwr_info[cpu].disconnected != ENTRIES_PER_L2_PAGETABLE) 3.63 + if ( ptwr_info[cpu].disconnected != ENTRIES_PER_L2_PAGETABLE ) 3.64 ptwr_reconnect_disconnected(addr); 3.65 PTWR_PRINTK(("[A] pl2e %p l2e %08lx pfn %08lx " 3.66 "taf %08x/%08x/%u\n", pl2e, l2_pgentry_val(*pl2e), 3.67 @@ -1481,12 +1486,13 @@ int ptwr_do_page_fault(unsigned long add 3.68 ENTRIES_PER_L1_PAGETABLE * sizeof(l1_pgentry_t)); 3.69 unmap_domain_mem(pl1e); 3.70 } 3.71 + 3.72 /* make pt page writable */ 3.73 pte |= _PAGE_RW; 3.74 PTWR_PRINTK(("update %p pte to %08lx\n", 3.75 &linear_pg_table[addr>>PAGE_SHIFT], pte)); 3.76 - if (__put_user(pte, (unsigned long *) 3.77 - &linear_pg_table[addr>>PAGE_SHIFT])) 3.78 + if ( __put_user(pte, (unsigned long *) 3.79 + &linear_pg_table[addr>>PAGE_SHIFT]) ) 3.80 BUG(); 3.81 return 1; 3.82 } 3.83 @@ -1497,20 +1503,20 @@ int ptwr_do_page_fault(unsigned long add 3.84 void ptwr_init_backpointers(void) 3.85 { 3.86 struct pfn_info *page; 3.87 - unsigned long pde, pfn; 3.88 + unsigned long pde; 3.89 int va_idx; 3.90 3.91 - for (va_idx = 0; va_idx < DOMAIN_ENTRIES_PER_L2_PAGETABLE; va_idx++) { 3.92 + for ( va_idx = 0; va_idx < DOMAIN_ENTRIES_PER_L2_PAGETABLE; va_idx++ ) 3.93 + { 3.94 /* check if entry valid */ 3.95 pde = l2_pgentry_val(linear_l2_table[va_idx]); 3.96 - if ((pde & _PAGE_PRESENT) == 0) 3.97 + if ( (pde & _PAGE_PRESENT) == 0 ) 3.98 continue; 3.99 - pfn = pde >> PAGE_SHIFT; 3.100 - page = &frame_table[pfn]; 3.101 + 3.102 + page = &frame_table[pde >> PAGE_SHIFT]; 3.103 /* assert that page is an l1_page_table XXXcl maybe l2? */ 3.104 - if ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) { 3.105 + if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table ) 3.106 BUG(); 3.107 - } 3.108 page->u.inuse.type_info &= ~PGT_va_mask; 3.109 page->u.inuse.type_info |= va_idx << PGT_va_shift; 3.110 } 3.111 @@ -1525,11 +1531,14 @@ void ptwr_status(void) 3.112 l2_pgentry_t *pl2e; 3.113 int cpu = smp_processor_id(); 3.114 3.115 - for (i = 0; i < ptwr_info[cpu].writable_idx; i++) { 3.116 + for ( i = 0; i < ptwr_info[cpu].writable_idx; i++ ) 3.117 + { 3.118 unsigned long *writable_pte = (unsigned long *)&linear_pg_table 3.119 [ptwr_info[cpu].writables[i]>>PAGE_SHIFT]; 3.120 - if (__get_user(pte, writable_pte)) 3.121 + 3.122 + if ( __get_user(pte, writable_pte) ) 3.123 BUG(); 3.124 + 3.125 pfn = pte >> PAGE_SHIFT; 3.126 page = &frame_table[pfn]; 3.127 printk("need to alloc l1 page %p\n", page); 3.128 @@ -1538,14 +1547,14 @@ void ptwr_status(void) 3.129 writable_pte, pte); 3.130 } 3.131 3.132 - if (ptwr_info[cpu].disconnected == ENTRIES_PER_L2_PAGETABLE) 3.133 + if ( ptwr_info[cpu].disconnected == ENTRIES_PER_L2_PAGETABLE ) 3.134 return; 3.135 3.136 printk("disconnected space: space %08lx\n", 3.137 ptwr_info[cpu].disconnected << L2_PAGETABLE_SHIFT); 3.138 pl2e = &linear_l2_table[ptwr_info[cpu].disconnected]; 3.139 3.140 - if (__get_user(pte, ptwr_info[cpu].writable_l1)) 3.141 + if ( __get_user(pte, (unsigned long *)ptwr_info[cpu].writable_l1) ) 3.142 BUG(); 3.143 pfn = pte >> PAGE_SHIFT; 3.144 page = &frame_table[pfn];