ia64/xen-unstable
changeset 862:682f15023c2b
bitkeeper revision 1.513.1.1 (3f9fea6boX1jvB2gmthgt7V7gZTbkA)
Many files:
no more segments - initial renaming
xl_vbd.c:
Rename: xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_segment.c -> xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_vbd.c
.del-xl_segment_proc.c~3ed615f9a000995e:
Delete: xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_segment_proc.c
.del-xen_physdisk.c~6fc678012cc92172:
Delete: xen/drivers/block/xen_physdisk.c
.del-block.h~6f11045f32e11b92:
Delete: xen/include/xeno/block.h
.del-segment.h~ae27bd5cb9ab97ea:
Delete: xen/include/xeno/segment.h
xen_vbd.c:
Rename: xen/drivers/block/xen_segment.c -> xen/drivers/block/xen_vbd.c
vbd.h:
Rename: xen/include/xeno/physdisk.h -> xen/include/xeno/vbd.h
Rename: xen/include/hypervisor-ifs/segment.h -> xen/include/hypervisor-ifs/vbd.h
Many files:
no more segments - initial renaming
xl_vbd.c:
Rename: xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_segment.c -> xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_vbd.c
.del-xl_segment_proc.c~3ed615f9a000995e:
Delete: xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_segment_proc.c
.del-xen_physdisk.c~6fc678012cc92172:
Delete: xen/drivers/block/xen_physdisk.c
.del-block.h~6f11045f32e11b92:
Delete: xen/include/xeno/block.h
.del-segment.h~ae27bd5cb9ab97ea:
Delete: xen/include/xeno/segment.h
xen_vbd.c:
Rename: xen/drivers/block/xen_segment.c -> xen/drivers/block/xen_vbd.c
vbd.h:
Rename: xen/include/xeno/physdisk.h -> xen/include/xeno/vbd.h
Rename: xen/include/hypervisor-ifs/segment.h -> xen/include/hypervisor-ifs/vbd.h
line diff
1.1 --- a/.rootkeys Tue Oct 14 10:42:54 2003 +0000 1.2 +++ b/.rootkeys Wed Oct 29 16:27:23 2003 +0000 1.3 @@ -256,8 +256,7 @@ 3ddb79beME_0abStePF6fU8XLuQnWw xen/drive 1.4 3ddb79beNQVrdGyoI4njXhgAjD6a4A xen/drivers/block/genhd.c 1.5 3ddb79beyWwLRP_BiM2t1JKgr_plEw xen/drivers/block/ll_rw_blk.c 1.6 3e4a8cb7RhubVgsPwO7cK0pgAN8WCQ xen/drivers/block/xen_block.c 1.7 -3f045882spujO81dMl-fYWGiZ8WcPw xen/drivers/block/xen_physdisk.c 1.8 -3e5d129asHNyZOjBKTkqs-9AFzxemA xen/drivers/block/xen_segment.c 1.9 +3e5d129asHNyZOjBKTkqs-9AFzxemA xen/drivers/block/xen_vbd.c 1.10 3e9c248afxxsnAzIt2na7Ej24yNFzg xen/drivers/cdrom/Makefile 1.11 3e9c248ajUkn2W3n4vgm72Hp2ftZ8A xen/drivers/cdrom/cdrom.c 1.12 3e4a8cb7alzQCDKS7MlioPoHBKYkdQ xen/drivers/char/Makefile 1.13 @@ -461,7 +460,7 @@ 3ddb79c2PMeWTK86y4C3F4MzHw4A1g xen/inclu 1.14 3ddb79c25UE59iu4JJcbRalx95mvcg xen/include/hypervisor-ifs/hypervisor-if.h 1.15 3ead095dE_VF-QA88rl_5cWYRWtRVQ xen/include/hypervisor-ifs/kbd.h 1.16 3ddb79c2oRPrzClk3zbTkRHlpumzKA xen/include/hypervisor-ifs/network.h 1.17 -3f0d22cbroqp_BkoDPwkfRJhaw1LiQ xen/include/hypervisor-ifs/segment.h 1.18 +3f0d22cbroqp_BkoDPwkfRJhaw1LiQ xen/include/hypervisor-ifs/vbd.h 1.19 3ddb79c4qbCoOFHrv9sCGshbWzBVlQ xen/include/scsi/scsi.h 1.20 3ddb79c4R4iVwqIIeychVQYmIH4FUg xen/include/scsi/scsi_ioctl.h 1.21 3ddb79c4yw_mfd4Uikn3v_IOPRpa1Q xen/include/scsi/scsicam.h 1.22 @@ -471,7 +470,6 @@ 3e397e66m2tO3s-J8Jnr7Ws_tGoPTg xen/inclu 1.23 3ddb79c0nTsjSpVK4ZVTI9WwN24xtQ xen/include/xeno/blk.h 1.24 3ddb79c0dVhTHLsv6CPTf4baKix4mA xen/include/xeno/blkdev.h 1.25 3ddb79c18ePBgitnOs7GiOCFilODVw xen/include/xeno/blkpg.h 1.26 -3ddb79c2SisDOHDyTeK5-MV3m7pNbA xen/include/xeno/block.h 1.27 3ddb79c1oOjpQbp68MW7yiUpoi-S-w xen/include/xeno/brlock.h 1.28 3ddb79c1x7Ie3kifu7dQRx8y7HVyvA xen/include/xeno/byteorder/big_endian.h 1.29 3ddb79c1qFXOEX1eD0yXJ_gsGkUt8w xen/include/xeno/byteorder/generic.h 1.30 @@ -525,11 +523,9 @@ 3ddb79c2Fg44_PBPVxHSC0gTOMq4Ow xen/inclu 1.31 3ddb79c0MOVXq8qZDQRGb6z64_xAwg xen/include/xeno/pci_ids.h 1.32 3e54c38dlSCVdyVM4PKcrSfzLLxWUQ xen/include/xeno/perfc.h 1.33 3e54c38de9SUSYSAwxDf_DwkpAnQFA xen/include/xeno/perfc_defn.h 1.34 -3f055a3dwldYR102YcSuBaxIf9t3Jw xen/include/xeno/physdisk.h 1.35 3ddb79c04nQVR3EYM5L4zxDV_MCo1g xen/include/xeno/prefetch.h 1.36 3e4540ccU1sgCx8seIMGlahmMfv7yQ xen/include/xeno/reboot.h 1.37 3ddb79c0LzqqS0LhAQ50ekgj4oGl7Q xen/include/xeno/sched.h 1.38 -3e5d129a2bbGTpVSZFN_mAnbm97ixw xen/include/xeno/segment.h 1.39 3ddb79c0VDeD-Oft5eNfMneTU3D1dQ xen/include/xeno/skbuff.h 1.40 3ddb79c14dXIhP7C2ahnoD08K90G_w xen/include/xeno/slab.h 1.41 3ddb79c09xbS-xxfKxuV3JETIhBzmg xen/include/xeno/smp.h 1.42 @@ -542,6 +538,7 @@ 3ddb79c2HFkXuRxi1CriJtSFmY6Ybw xen/inclu 1.43 3ddb79c2_m8lT9jDKse_tePj7zcnNQ xen/include/xeno/timex.h 1.44 3ddb79c2e2C14HkndNEJlYwXaPrF5A xen/include/xeno/tqueue.h 1.45 3ddb79c1-kVvF8cVa0k3ZHDdBMj01Q xen/include/xeno/types.h 1.46 +3f055a3dwldYR102YcSuBaxIf9t3Jw xen/include/xeno/vbd.h 1.47 3e8827bdaqPeZAWGVOwswgY9bWSx4g xen/include/xeno/version.h 1.48 3ddb79c2Ae5KpzhC9LCYG7mP_Vi4Aw xen/include/xeno/vif.h 1.49 3ddb79c4YQCQ6r0xNLLu0jfbM7pVmA xen/net/Makefile 1.50 @@ -572,8 +569,7 @@ 3e5a4e65pP5spJErBW69pJxSSdK9RA xenolinux 1.51 3e67f822FOPwqHiaRKbrskgWgoNL5g xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.h 1.52 3e677190SjkzJIvFifRVeYpIZOCtYA xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_ide.c 1.53 3e677193nOKKTLJzcAu4SYdbZaia8g xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_scsi.c 1.54 -3e676eb5RXnHzSHgA1BvM0B1aIm4qg xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_segment.c 1.55 -3e5d129aDldt6geU2-2SzBae34sQzg xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_segment_proc.c 1.56 +3e676eb5RXnHzSHgA1BvM0B1aIm4qg xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_vbd.c 1.57 3e5a4e65G3e2s0ghPMgiJ-gBTUJ0uQ xenolinux-2.4.22-sparse/arch/xeno/drivers/console/Makefile 1.58 3e5a4e651TH-SXHoufurnWjgl5bfOA xenolinux-2.4.22-sparse/arch/xeno/drivers/console/console.c 1.59 3e5a4e656nfFISThfbyXQOA6HN6YHw xenolinux-2.4.22-sparse/arch/xeno/drivers/dom0/Makefile
2.1 --- a/tools/internal/dom0_defs.h Tue Oct 14 10:42:54 2003 +0000 2.2 +++ b/tools/internal/dom0_defs.h Wed Oct 29 16:27:23 2003 +0000 2.3 @@ -27,6 +27,7 @@ typedef signed long long s64; 2.4 #include <asm-xeno/proc_cmd.h> 2.5 #include <hypervisor-ifs/hypervisor-if.h> 2.6 #include <hypervisor-ifs/dom0_ops.h> 2.7 +#include <hypervisor-ifs/vbd.h> 2.8 2.9 #define ERROR(_m) \ 2.10 fprintf(stderr, "ERROR: %s\n", (_m))
3.1 --- a/xen/common/domain.c Tue Oct 14 10:42:54 2003 +0000 3.2 +++ b/xen/common/domain.c Wed Oct 29 16:27:23 2003 +0000 3.3 @@ -16,8 +16,7 @@ 3.4 #include <asm/msr.h> 3.5 #include <xeno/blkdev.h> 3.6 #include <xeno/console.h> 3.7 -#include <hypervisor-ifs/block.h> 3.8 -#include <xeno/physdisk.h> 3.9 +#include <xeno/vbd.h> 3.10 3.11 /* 3.12 * NB. No ring-3 access in initial guestOS pagetables. Note that we allow 3.13 @@ -144,8 +143,8 @@ void __kill_domain(struct task_struct *p 3.14 3.15 unlink_blkdev_info(p); 3.16 3.17 - for ( i = 0; i < XEN_MAX_SEGMENTS; i++ ) 3.18 - xen_segment_delete(p, i); 3.19 + for ( i = 0; i < XEN_MAX_VBDS; i++ ) 3.20 + xen_vbd_delete(p, i); 3.21 3.22 for ( i = 0; i < MAX_DOMAIN_VIFS; i++ ) 3.23 unlink_net_vif(p->net_vif_list[i]);
4.1 --- a/xen/drivers/block/xen_block.c Tue Oct 14 10:42:54 2003 +0000 4.2 +++ b/xen/drivers/block/xen_block.c Wed Oct 29 16:27:23 2003 +0000 4.3 @@ -13,12 +13,12 @@ 4.4 #include <hypervisor-ifs/block.h> 4.5 #include <hypervisor-ifs/hypervisor-if.h> 4.6 #include <asm-i386/io.h> 4.7 +#include <asm/domain_page.h> 4.8 #include <xeno/spinlock.h> 4.9 #include <xeno/keyhandler.h> 4.10 #include <xeno/interrupt.h> 4.11 -#include <xeno/segment.h> 4.12 +#include <xeno/vbd.h> 4.13 #include <xeno/slab.h> 4.14 -#include <xeno/physdisk.h> 4.15 4.16 #if 0 4.17 #define DPRINTK(_f, _a...) printk( _f , ## _a ) 4.18 @@ -100,12 +100,10 @@ static void unlock_buffer(struct task_st 4.19 static void io_schedule(unsigned long unused); 4.20 static int do_block_io_op_domain(struct task_struct *p, int max_to_do); 4.21 static void dispatch_rw_block_io(struct task_struct *p, int index); 4.22 -static void dispatch_probe_blk(struct task_struct *p, int index); 4.23 -static void dispatch_probe_seg(struct task_struct *p, int index); 4.24 -static void dispatch_probe_seg_all(struct task_struct *p, int index); 4.25 +static void dispatch_probe(struct task_struct *p, int index); 4.26 static void dispatch_debug_block_io(struct task_struct *p, int index); 4.27 -static void dispatch_create_segment(struct task_struct *p, int index); 4.28 -static void dispatch_delete_segment(struct task_struct *p, int index); 4.29 +static void dispatch_create_vbd(struct task_struct *p, int index); 4.30 +static void dispatch_delete_vbd(struct task_struct *p, int index); 4.31 static void dispatch_grant_physdev(struct task_struct *p, int index); 4.32 static void dispatch_probe_physdev(struct task_struct *p, int index); 4.33 static void make_response(struct task_struct *p, unsigned long id, 4.34 @@ -236,16 +234,43 @@ static void end_block_io_op(struct buffe 4.35 } 4.36 4.37 4.38 +long vbd_attach(vbd_attach_t *info) 4.39 +{ 4.40 + printk("vbd_attach called!!!\n"); 4.41 + return -ENOSYS; 4.42 +} 4.43 4.44 -/****************************************************************** 4.45 - * GUEST-OS SYSCALL -- Indicates there are requests outstanding. 4.46 - */ 4.47 +/* ----[ Syscall Interface ]------------------------------------------------*/ 4.48 + 4.49 +long do_block_io_op(block_io_op_t *u_block_io_op) 4.50 +{ 4.51 + long ret = 0; 4.52 + block_io_op_t op; 4.53 + 4.54 + if (copy_from_user(&op, u_block_io_op, sizeof(op))) 4.55 + return -EFAULT; 4.56 + 4.57 + switch (op.cmd) { 4.58 4.59 -long do_block_io_op(void) 4.60 -{ 4.61 - add_to_blkdev_list_tail(current); 4.62 - maybe_trigger_io_schedule(); 4.63 - return 0L; 4.64 + case BLOCK_IO_OP_SIGNAL: 4.65 + /* simply indicates there're reqs outstanding => add current to list */ 4.66 + add_to_blkdev_list_tail(current); 4.67 + maybe_trigger_io_schedule(); 4.68 + break; 4.69 + 4.70 + case BLOCK_IO_OP_ATTACH_VBD: 4.71 + /* attach a VBD to a given domain; caller must be privileged */ 4.72 + if(!IS_PRIV(current)) 4.73 + return -EPERM; 4.74 + ret = vbd_attach(&op.u.attach_info); 4.75 + break; 4.76 + 4.77 + default: 4.78 + ret = -ENOSYS; 4.79 + } 4.80 + 4.81 + 4.82 + return ret; 4.83 } 4.84 4.85 4.86 @@ -375,28 +400,20 @@ static int do_block_io_op_domain(struct 4.87 dispatch_rw_block_io(p, i); 4.88 break; 4.89 4.90 - case XEN_BLOCK_PROBE_BLK: 4.91 - dispatch_probe_blk(p, i); 4.92 - break; 4.93 - 4.94 - case XEN_BLOCK_PROBE_SEG: 4.95 - dispatch_probe_seg(p, i); 4.96 - break; 4.97 - 4.98 - case XEN_BLOCK_PROBE_SEG_ALL: 4.99 - dispatch_probe_seg_all(p, i); 4.100 + case XEN_BLOCK_PROBE: 4.101 + dispatch_probe(p, i); 4.102 break; 4.103 4.104 case XEN_BLOCK_DEBUG: 4.105 dispatch_debug_block_io(p, i); 4.106 break; 4.107 4.108 - case XEN_BLOCK_SEG_CREATE: 4.109 - dispatch_create_segment(p, i); 4.110 + case XEN_BLOCK_VBD_CREATE: 4.111 + dispatch_create_vbd(p, i); 4.112 break; 4.113 4.114 - case XEN_BLOCK_SEG_DELETE: 4.115 - dispatch_delete_segment(p, i); 4.116 + case XEN_BLOCK_VBD_DELETE: 4.117 + dispatch_delete_vbd(p, i); 4.118 break; 4.119 4.120 case XEN_BLOCK_PHYSDEV_GRANT: 4.121 @@ -491,7 +508,7 @@ static void dispatch_grant_physdev(struc 4.122 XEN_BLOCK_PHYSDEV_GRANT, result); 4.123 } 4.124 4.125 -static void dispatch_create_segment(struct task_struct *p, int index) 4.126 +static void dispatch_create_vbd(struct task_struct *p, int index) 4.127 { 4.128 blk_ring_t *blk_ring = p->blk_ring_base; 4.129 unsigned long flags, buffer; 4.130 @@ -500,7 +517,7 @@ static void dispatch_create_segment(stru 4.131 4.132 if ( p->domain != 0 ) 4.133 { 4.134 - DPRINTK("dispatch_create_segment called by dom%d\n", p->domain); 4.135 + DPRINTK("dispatch_create_vbd called by dom%d\n", p->domain); 4.136 result = 1; 4.137 goto out; 4.138 } 4.139 @@ -510,7 +527,7 @@ static void dispatch_create_segment(stru 4.140 spin_lock_irqsave(&p->page_lock, flags); 4.141 if ( !__buffer_is_valid(p, buffer, sizeof(xv_disk_t), 1) ) 4.142 { 4.143 - DPRINTK("Bad buffer in dispatch_create_segment\n"); 4.144 + DPRINTK("Bad buffer in dispatch_create_vbd\n"); 4.145 spin_unlock_irqrestore(&p->page_lock, flags); 4.146 result = 1; 4.147 goto out; 4.148 @@ -519,24 +536,25 @@ static void dispatch_create_segment(stru 4.149 spin_unlock_irqrestore(&p->page_lock, flags); 4.150 4.151 xvd = phys_to_virt(buffer); 4.152 - result = xen_segment_create(xvd); 4.153 + result = xen_vbd_create(xvd); 4.154 4.155 unlock_buffer(p, buffer, sizeof(xv_disk_t), 1); 4.156 4.157 out: 4.158 make_response(p, blk_ring->ring[index].req.id, 4.159 - XEN_BLOCK_SEG_CREATE, result); 4.160 + XEN_BLOCK_VBD_CREATE, result); 4.161 } 4.162 4.163 -static void dispatch_delete_segment(struct task_struct *p, int index) 4.164 +static void dispatch_delete_vbd(struct task_struct *p, int index) 4.165 { 4.166 - DPRINTK("dispatch_delete_segment: unimplemented\n"); 4.167 + DPRINTK("dispatch_delete_vbd: unimplemented\n"); 4.168 } 4.169 4.170 -static void dispatch_probe_blk(struct task_struct *p, int index) 4.171 +static void dispatch_probe(struct task_struct *p, int index) 4.172 { 4.173 extern void ide_probe_devices(xen_disk_info_t *xdi); 4.174 extern void scsi_probe_devices(xen_disk_info_t *xdi); 4.175 + extern void vbd_probe_devices(xen_disk_info_t *xdi, struct task_struct *p); 4.176 4.177 blk_ring_t *blk_ring = p->blk_ring_base; 4.178 xen_disk_info_t *xdi; 4.179 @@ -553,80 +571,30 @@ static void dispatch_probe_blk(struct ta 4.180 rc = 1; 4.181 goto out; 4.182 } 4.183 + 4.184 __lock_buffer(buffer, sizeof(xen_disk_info_t), 1); 4.185 spin_unlock_irqrestore(&p->page_lock, flags); 4.186 4.187 - xdi = phys_to_virt(buffer); 4.188 - ide_probe_devices(xdi); 4.189 - scsi_probe_devices(xdi); 4.190 + /* 4.191 + ** XXX SMH: all three of the below probe functions /append/ their 4.192 + ** info to the xdi array; i.e. they assume that all earlier slots 4.193 + ** are correctly filled, and that xdi->count points to the first 4.194 + ** free entry in the array. All kinda gross but it'll do for now. 4.195 + */ 4.196 + xdi = map_domain_mem(buffer); 4.197 + xdi->count = 0; 4.198 + if(IS_PRIV(p)) { 4.199 + /* privilege domains always gets access to the 'real' devices */ 4.200 + ide_probe_devices(xdi); 4.201 + scsi_probe_devices(xdi); 4.202 + } 4.203 + vbd_probe_devices(xdi, p); 4.204 + unmap_domain_mem(xdi); 4.205 4.206 unlock_buffer(p, buffer, sizeof(xen_disk_info_t), 1); 4.207 4.208 out: 4.209 - make_response(p, blk_ring->ring[index].req.id, XEN_BLOCK_PROBE_BLK, rc); 4.210 -} 4.211 - 4.212 -static void dispatch_probe_seg(struct task_struct *p, 4.213 - int index) 4.214 -{ 4.215 - extern void xen_segment_probe(struct task_struct *, xen_disk_info_t *); 4.216 - 4.217 - blk_ring_t *blk_ring = p->blk_ring_base; 4.218 - xen_disk_info_t *xdi; 4.219 - unsigned long flags, buffer; 4.220 - int rc = 0; 4.221 - 4.222 - buffer = blk_ring->ring[index].req.buffer_and_sects[0] & ~0x1FF; 4.223 - 4.224 - spin_lock_irqsave(&p->page_lock, flags); 4.225 - if ( !__buffer_is_valid(p, buffer, sizeof(xen_disk_info_t), 1) ) 4.226 - { 4.227 - DPRINTK("Bad buffer in dispatch_probe_seg\n"); 4.228 - spin_unlock_irqrestore(&p->page_lock, flags); 4.229 - rc = 1; 4.230 - goto out; 4.231 - } 4.232 - __lock_buffer(buffer, sizeof(xen_disk_info_t), 1); 4.233 - spin_unlock_irqrestore(&p->page_lock, flags); 4.234 - 4.235 - xdi = phys_to_virt(buffer); 4.236 - xen_segment_probe(p, xdi); 4.237 - 4.238 - unlock_buffer(p, buffer, sizeof(xen_disk_info_t), 1); 4.239 - 4.240 - out: 4.241 - make_response(p, blk_ring->ring[index].req.id, XEN_BLOCK_PROBE_SEG, rc); 4.242 -} 4.243 - 4.244 -static void dispatch_probe_seg_all(struct task_struct *p, int index) 4.245 -{ 4.246 - extern void xen_segment_probe_all(xen_segment_info_t *); 4.247 - 4.248 - blk_ring_t *blk_ring = p->blk_ring_base; 4.249 - xen_segment_info_t *xsi; 4.250 - unsigned long flags, buffer; 4.251 - int rc = 0; 4.252 - 4.253 - buffer = blk_ring->ring[index].req.buffer_and_sects[0] & ~0x1FF; 4.254 - 4.255 - spin_lock_irqsave(&p->page_lock, flags); 4.256 - if ( !__buffer_is_valid(p, buffer, sizeof(xen_segment_info_t), 1) ) 4.257 - { 4.258 - DPRINTK("Bad buffer in dispatch_probe_seg_all\n"); 4.259 - spin_unlock_irqrestore(&p->page_lock, flags); 4.260 - rc = 1; 4.261 - goto out; 4.262 - } 4.263 - __lock_buffer(buffer, sizeof(xen_segment_info_t), 1); 4.264 - spin_unlock_irqrestore(&p->page_lock, flags); 4.265 - 4.266 - xsi = phys_to_virt(buffer); 4.267 - xen_segment_probe_all(xsi); 4.268 - 4.269 - unlock_buffer(p, buffer, sizeof(xen_segment_info_t), 1); 4.270 - 4.271 - out: 4.272 - make_response(p, blk_ring->ring[index].req.id, XEN_BLOCK_PROBE_SEG_ALL, rc); 4.273 + make_response(p, blk_ring->ring[index].req.id, XEN_BLOCK_PROBE, rc); 4.274 } 4.275 4.276 static void dispatch_rw_block_io(struct task_struct *p, int index) 4.277 @@ -680,14 +648,14 @@ static void dispatch_rw_block_io(struct 4.278 /* Get the physical device and block index. */ 4.279 if ( (req->device & XENDEV_TYPE_MASK) == XENDEV_VIRTUAL ) 4.280 { 4.281 - new_segs = xen_segment_map_request( 4.282 + new_segs = xen_vbd_map_request( 4.283 &phys_seg[nr_psegs], p, operation, 4.284 req->device, 4.285 req->sector_number + tot_sects, 4.286 buffer, nr_sects); 4.287 if ( new_segs <= 0 ) 4.288 { 4.289 - DPRINTK("bogus xen_segment_map_request\n"); 4.290 + DPRINTK("bogus xen_vbd_map_request\n"); 4.291 goto bad_descriptor; 4.292 } 4.293 } 4.294 @@ -861,10 +829,10 @@ void init_blkdev_info(struct task_struct 4.295 SHARE_PFN_WITH_DOMAIN(virt_to_page(p->blk_ring_base), p->domain); 4.296 p->blkdev_list.next = NULL; 4.297 4.298 - memset(p->segment_list, 0, sizeof(p->segment_list)); 4.299 + memset(p->vbd_list, 0, sizeof(p->vbd_list)); 4.300 4.301 /* Get any previously created segments. */ 4.302 - xen_refresh_segment_list(p); 4.303 + xen_refresh_vbd_list(p); 4.304 } 4.305 4.306 /* End-of-day teardown for a domain. */ 4.307 @@ -905,7 +873,7 @@ void initialize_block_io () 4.308 "buffer_head_cache", sizeof(struct buffer_head), 4.309 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 4.310 4.311 - xen_segment_initialize(); 4.312 + xen_vbd_initialize(); 4.313 4.314 add_key_handler('b', dump_blockq, "dump xen ide blkdev statistics"); 4.315 }
5.1 --- a/xen/drivers/block/xen_physdisk.c Tue Oct 14 10:42:54 2003 +0000 5.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 5.3 @@ -1,277 +0,0 @@ 5.4 -#include <xeno/sched.h> 5.5 -#include <xeno/list.h> 5.6 -#include <xeno/blkdev.h> 5.7 -#include <xeno/sched.h> 5.8 -#include <xeno/slab.h> 5.9 -#include <asm/domain_page.h> 5.10 -#include <asm/io.h> 5.11 -#include <xeno/segment.h> 5.12 -#include <xeno/physdisk.h> 5.13 - 5.14 -#define MAX(a,b) ((a) > (b) ? (a) : (b)) 5.15 - 5.16 -#if 0 5.17 -#define DPRINTK printk 5.18 -#else 5.19 -#define DPRINTK(...) 5.20 -#endif 5.21 - 5.22 -/* The idea is that, for each sector of each disk, each domain has two 5.23 - bits, saying whether they can read the sector or write it. That 5.24 - would take too much memory, so instead each process has a list of 5.25 - (device, start, end, mode) quads which say what it has access to, 5.26 - and we fake the logical view on top of that. */ 5.27 -struct physdisk_ace { 5.28 - struct list_head list; 5.29 - unsigned short device; 5.30 - unsigned short partition; 5.31 - unsigned long start_sect; 5.32 - unsigned long n_sectors; 5.33 - int mode; 5.34 -}; 5.35 - 5.36 -/* Operation is a blkdev constant i.e. READ, WRITE, ... */ 5.37 -/* Must be called with p->physdev_lock held. */ 5.38 -static struct physdisk_ace *find_ace(const struct task_struct *p, 5.39 - unsigned short dev, 5.40 - unsigned long sect, int operation) 5.41 -{ 5.42 - struct list_head *cur_ace_head; 5.43 - struct physdisk_ace *cur_ace; 5.44 - 5.45 - list_for_each(cur_ace_head, &p->physdisk_aces) 5.46 - { 5.47 - cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list); 5.48 - DPRINTK("Is [%lx, %lx) good for %lx?\n", 5.49 - cur_ace->start_sect, 5.50 - cur_ace->start_sect + cur_ace->n_sectors, sect); 5.51 - if ( (sect >= cur_ace->start_sect) && 5.52 - (sect < (cur_ace->start_sect + cur_ace->n_sectors)) && 5.53 - (dev == cur_ace->device) && 5.54 - (((operation == READ) && (cur_ace->mode & PHYSDISK_MODE_R)) || 5.55 - ((operation == WRITE) && (cur_ace->mode & PHYSDISK_MODE_W))) ) 5.56 - return cur_ace; 5.57 - } 5.58 - return NULL; 5.59 -} 5.60 - 5.61 -/* Hold the lock on entry, it remains held on exit. */ 5.62 -static void xen_physdisk_revoke_access(unsigned short dev, 5.63 - unsigned long start_sect, 5.64 - unsigned long n_sectors, 5.65 - struct task_struct *p) 5.66 -{ 5.67 - /* Find every ace which intersects [start_sect, start_sect + 5.68 - n_sectors] and either remove it completely or truncate it 5.69 - down. */ 5.70 - struct list_head *cur_ace_head; 5.71 - struct physdisk_ace *cur_ace, *new_ace; 5.72 - unsigned long kill_zone_end, ace_end; 5.73 - 5.74 - kill_zone_end = start_sect + n_sectors; 5.75 - list_for_each(cur_ace_head, &p->physdisk_aces) 5.76 - { 5.77 - cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list); 5.78 - ace_end = cur_ace->start_sect + cur_ace->n_sectors; 5.79 - if ( (cur_ace->start_sect >= kill_zone_end) || 5.80 - (ace_end <= start_sect) || 5.81 - (cur_ace->device != dev) ) 5.82 - continue; 5.83 - 5.84 - DPRINTK("Killing ace [%lx, %lx) against kill zone [%lx, %lx)\n", 5.85 - cur_ace->start_sect, ace_end, start_sect, kill_zone_end); 5.86 - 5.87 - if ( (cur_ace->start_sect >= start_sect) && 5.88 - (ace_end <= kill_zone_end) ) 5.89 - { 5.90 - /* ace entirely within kill zone -> kill it */ 5.91 - list_del(cur_ace_head); 5.92 - cur_ace_head = cur_ace_head->prev; 5.93 - kfree(cur_ace); 5.94 - } 5.95 - else if ( ace_end <= kill_zone_end ) 5.96 - { 5.97 - /* ace start before kill start, ace end in kill zone, 5.98 - move ace end. */ 5.99 - cur_ace->n_sectors = start_sect - cur_ace->start_sect; 5.100 - } 5.101 - else if ( cur_ace->start_sect >= start_sect ) 5.102 - { 5.103 - /* ace start after kill start, ace end outside kill zone, 5.104 - move ace start. */ 5.105 - cur_ace->start_sect = kill_zone_end; 5.106 - cur_ace->n_sectors = ace_end - cur_ace->start_sect; 5.107 - } 5.108 - else 5.109 - { 5.110 - /* The fun one: the ace entirely includes the kill zone. */ 5.111 - /* Cut the current ace down to just the bit before the kzone, 5.112 - create a new ace for the bit just after it. */ 5.113 - new_ace = kmalloc(sizeof(*cur_ace), GFP_KERNEL); 5.114 - new_ace->device = dev; 5.115 - new_ace->start_sect = kill_zone_end; 5.116 - new_ace->n_sectors = ace_end - kill_zone_end; 5.117 - new_ace->mode = cur_ace->mode; 5.118 - 5.119 - cur_ace->n_sectors = start_sect - cur_ace->start_sect; 5.120 - 5.121 - list_add(&new_ace->list, cur_ace_head); 5.122 - } 5.123 - } 5.124 -} 5.125 - 5.126 -/* Hold the lock on entry, it remains held on exit. */ 5.127 -static int xen_physdisk_grant_access(unsigned short dev, 5.128 - unsigned short partition, 5.129 - unsigned long start_sect, 5.130 - unsigned long n_sectors, 5.131 - int mode, struct task_struct *p) 5.132 -{ 5.133 - struct physdisk_ace *cur_ace; 5.134 - 5.135 - /* Make sure it won't overlap with any existing ACEs. */ 5.136 - /* XXX this isn't quite right if the domain already has read access 5.137 - and we try to grant write access, or vice versa. */ 5.138 - xen_physdisk_revoke_access(dev, start_sect, n_sectors, p); 5.139 - 5.140 - if ( mode ) 5.141 - { 5.142 - cur_ace = kmalloc(sizeof(*cur_ace), GFP_KERNEL); 5.143 - cur_ace->device = dev; 5.144 - cur_ace->start_sect = start_sect; 5.145 - cur_ace->n_sectors = n_sectors; 5.146 - cur_ace->mode = mode; 5.147 - cur_ace->partition = partition; 5.148 - 5.149 - list_add_tail(&cur_ace->list, &p->physdisk_aces); 5.150 - } 5.151 - 5.152 - return 0; 5.153 -} 5.154 - 5.155 -static void xen_physdisk_probe_access(physdisk_probebuf_t * buf, 5.156 - struct task_struct *p) 5.157 -{ 5.158 - int n_aces; 5.159 - struct list_head *cur_ace_head; 5.160 - struct physdisk_ace *cur_ace; 5.161 - int x = 0; 5.162 - 5.163 - n_aces = 0; 5.164 - list_for_each(cur_ace_head, &p->physdisk_aces) 5.165 - { 5.166 - x++; 5.167 - if ( x >= buf->start_ind ) 5.168 - { 5.169 - cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list); 5.170 - buf->entries[n_aces].device = cur_ace->device; 5.171 - buf->entries[n_aces].partition = cur_ace->partition; 5.172 - buf->entries[n_aces].start_sect = cur_ace->start_sect; 5.173 - buf->entries[n_aces].n_sectors = cur_ace->n_sectors; 5.174 - buf->entries[n_aces].mode = cur_ace->mode; 5.175 - n_aces++; 5.176 - } 5.177 - } 5.178 - buf->n_aces = n_aces; 5.179 -} 5.180 - 5.181 -int xen_physdisk_grant(xp_disk_t * xpd_in) 5.182 -{ 5.183 - struct task_struct *p = current; 5.184 - xp_disk_t *xpd = map_domain_mem(virt_to_phys(xpd_in)); 5.185 - int res; 5.186 - 5.187 - p = find_domain_by_id(xpd->domain); 5.188 - if ( p == NULL ) 5.189 - { 5.190 - DPRINTK("Bad domain!\n"); 5.191 - res = 1; 5.192 - goto out; 5.193 - } 5.194 - 5.195 - spin_lock(&p->physdev_lock); 5.196 - res = xen_physdisk_grant_access(xpd->device, 5.197 - xpd->partition, 5.198 - xpd->start_sect, 5.199 - xpd->n_sectors, xpd->mode, p); 5.200 - spin_unlock(&p->physdev_lock); 5.201 - put_task_struct(p); 5.202 - 5.203 - out: 5.204 - unmap_domain_mem(xpd); 5.205 - return res; 5.206 -} 5.207 - 5.208 -int xen_physdisk_probe(struct task_struct *requesting_domain, 5.209 - physdisk_probebuf_t * buf_in) 5.210 -{ 5.211 - struct task_struct *p; 5.212 - physdisk_probebuf_t *buf = map_domain_mem(virt_to_phys(buf_in)); 5.213 - int res; 5.214 - 5.215 - if ( (requesting_domain->domain != 0) && 5.216 - (requesting_domain->domain != buf->domain) ) 5.217 - { 5.218 - res = 1; 5.219 - goto out; 5.220 - } 5.221 - 5.222 - p = find_domain_by_id(buf->domain); 5.223 - if ( p == NULL ) 5.224 - { 5.225 - res = 1; 5.226 - goto out; 5.227 - } 5.228 - 5.229 - spin_lock(&p->physdev_lock); 5.230 - xen_physdisk_probe_access(buf, p); 5.231 - spin_unlock(&p->physdev_lock); 5.232 - put_task_struct(p); 5.233 - 5.234 - res = 0; 5.235 - out: 5.236 - unmap_domain_mem(buf); 5.237 - return res; 5.238 -} 5.239 - 5.240 -int xen_physdisk_access_okay(phys_seg_t * pseg, struct task_struct *p, 5.241 - int operation) 5.242 -{ 5.243 - struct physdisk_ace *cur_ace; 5.244 - unsigned long sect; 5.245 - 5.246 - DPRINTK 5.247 - ("Checking access for domain %d, start sect 0x%lx, length 0x%x.\n", 5.248 - p->domain, pseg->sector_number, pseg->nr_sects); 5.249 - 5.250 - for ( sect = pseg->sector_number; 5.251 - sect < pseg->sector_number + pseg->nr_sects; ) 5.252 - { 5.253 - /* XXX this would be a lot faster if the aces were sorted on start 5.254 - address. Also in revoke_access. */ 5.255 - spin_lock(&p->physdev_lock); 5.256 - cur_ace = find_ace(p, pseg->dev, sect, operation); 5.257 - spin_unlock(&p->physdev_lock); 5.258 - if ( cur_ace == NULL ) 5.259 - return 0; 5.260 - sect += 5.261 - MAX(cur_ace->n_sectors, 5.262 - pseg->nr_sects + pseg->sector_number - sect); 5.263 - } 5.264 - return 1; 5.265 -} 5.266 - 5.267 -void destroy_physdisk_aces(struct task_struct *p) 5.268 -{ 5.269 - struct list_head *cur_ace_head, *next_head; 5.270 - struct physdisk_ace *cur_ace; 5.271 - 5.272 - for ( cur_ace_head = p->physdisk_aces.next; 5.273 - cur_ace_head != &p->physdisk_aces; 5.274 - cur_ace_head = next_head ) 5.275 - { 5.276 - cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list); 5.277 - next_head = cur_ace_head->next; 5.278 - kfree(cur_ace); 5.279 - } 5.280 -}
6.1 --- a/xen/drivers/block/xen_segment.c Tue Oct 14 10:42:54 2003 +0000 6.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 6.3 @@ -1,429 +0,0 @@ 6.4 -/* 6.5 - * xen_segment.c 6.6 - */ 6.7 - 6.8 -#include <xeno/config.h> 6.9 -#include <xeno/types.h> 6.10 -#include <xeno/lib.h> 6.11 -#include <asm/io.h> 6.12 -#include <xeno/slab.h> 6.13 -#include <xeno/segment.h> 6.14 -#include <xeno/sched.h> 6.15 -#include <xeno/blkdev.h> 6.16 -#include <xeno/keyhandler.h> 6.17 -#include <asm/current.h> 6.18 -#include <asm/domain_page.h> 6.19 -#include <hypervisor-ifs/block.h> 6.20 - 6.21 -/* Global list of all possible segments. This can be changed in 6.22 - the following way: 6.23 - 6.24 - 1) UNUSED segment -> RO or RW segment. This requires the spinlock. 6.25 - 6.26 - 2) RO or RW -> UNUSED. This requires the lock and can only happen 6.27 - during process teardown. 6.28 - 6.29 - This means that processes can access entries in the list safely 6.30 - without having to hold any lock at all: they already have an entry 6.31 - allocated, and we know that entry can't become unused, as segments 6.32 - are only torn down when the domain is dieing, by which point it 6.33 - can't be accessing them anymore. */ 6.34 -static segment_t xsegments[XEN_MAX_SEGMENTS]; 6.35 -static spinlock_t xsegment_lock = SPIN_LOCK_UNLOCKED; 6.36 - 6.37 -#if 0 6.38 -#define DPRINTK(_f, _a...) printk( _f , ## _a ) 6.39 -#else 6.40 -#define DPRINTK(_f, _a...) ((void)0) 6.41 -#endif 6.42 - 6.43 -/* 6.44 - * xen_segment_map_request 6.45 - * 6.46 - * xen_device must be a valid device. 6.47 - * 6.48 - * NB. All offsets and sizes here are in sector units. 6.49 - * eg. 'size == 1' means an actual size of 512 bytes. 6.50 - * 6.51 - * Note that no locking is performed here whatsoever -- 6.52 - * we rely on the fact that once segment information is 6.53 - * established, it is only modified by domain shutdown, 6.54 - * and so if this is being called, noone is trying 6.55 - * to modify the segment list. 6.56 - */ 6.57 -int xen_segment_map_request( 6.58 - phys_seg_t *pseg, struct task_struct *p, int operation, 6.59 - unsigned short segment_number, 6.60 - unsigned long sect_nr, unsigned long buffer, unsigned short nr_sects) 6.61 -{ 6.62 - segment_t *seg; 6.63 - extent_t *ext; 6.64 - int sum, i; 6.65 - 6.66 - segment_number &= XENDEV_IDX_MASK; 6.67 - if ( segment_number >= XEN_MAX_SEGMENTS ) 6.68 - { 6.69 - DPRINTK("invalid segment number. %d %d\n", 6.70 - segment_number, XEN_MAX_SEGMENTS); 6.71 - goto fail; 6.72 - } 6.73 - 6.74 - seg = p->segment_list[segment_number]; 6.75 - if ( seg == NULL ) 6.76 - { 6.77 - DPRINTK("segment is null. %d\n", segment_number); 6.78 - goto fail; 6.79 - } 6.80 - 6.81 - /* check domain permissions */ 6.82 - if ( seg->domain != p->domain ) 6.83 - { 6.84 - DPRINTK("seg is for another domain. %d %d\n", seg->domain, p->domain); 6.85 - goto fail; 6.86 - } 6.87 - 6.88 - /* check rw access */ 6.89 - if ( ((operation == WRITE) && (seg->mode != XEN_SEGMENT_RW)) || 6.90 - ((operation == READ) && (seg->mode == XEN_SEGMENT_UNUSED)) ) 6.91 - { 6.92 - DPRINTK("illegal operation: %d %d\n", operation, seg->mode); 6.93 - goto fail; 6.94 - } 6.95 - 6.96 - if ( (nr_sects + sect_nr) <= sect_nr ) 6.97 - { 6.98 - DPRINTK("sector + size wrap! %08lx %04x\n", sect_nr, nr_sects); 6.99 - goto fail; 6.100 - } 6.101 - 6.102 - /* find extent, check size */ 6.103 - sum = 0; 6.104 - i = 0; 6.105 - ext = seg->extents; 6.106 - while ( (i < seg->num_extents) && ((sum + ext->size) <= sect_nr) ) 6.107 - { 6.108 - sum += ext->size; 6.109 - ext++; i++; 6.110 - } 6.111 - 6.112 - if ( (sum + ext->size) <= sect_nr ) 6.113 - { 6.114 - DPRINTK("extent size mismatch: %d %d : %d %ld %ld\n", 6.115 - i, seg->num_extents, sum, ext->size, sect_nr); 6.116 - goto fail; 6.117 - } 6.118 - 6.119 - pseg->sector_number = (sect_nr - sum) + ext->offset; 6.120 - pseg->buffer = buffer; 6.121 - pseg->nr_sects = nr_sects; 6.122 - pseg->dev = xendev_to_physdev(ext->disk); 6.123 - if ( pseg->dev == 0 ) 6.124 - { 6.125 - DPRINTK ("invalid device 0x%x 0x%lx 0x%lx\n", 6.126 - ext->disk, ext->offset, ext->size); 6.127 - goto fail; 6.128 - } 6.129 - 6.130 - /* We're finished if the virtual extent didn't overrun the phys extent. */ 6.131 - if ( (sum + ext->size) >= (sect_nr + nr_sects) ) 6.132 - return 1; /* entire read fits in this extent */ 6.133 - 6.134 - /* Hmmm... make sure there's another extent to overrun onto! */ 6.135 - if ( (i+1) == seg->num_extents ) 6.136 - { 6.137 - DPRINTK ("not enough extents %d %d\n", 6.138 - i, seg->num_extents); 6.139 - goto fail; 6.140 - } 6.141 - 6.142 - pseg[1].nr_sects = (sect_nr + nr_sects) - (sum + ext->size); 6.143 - pseg[0].nr_sects = sum + ext->size - sect_nr; 6.144 - pseg[1].buffer = buffer + (pseg->nr_sects << 9); 6.145 - pseg[1].sector_number = ext[1].offset; 6.146 - pseg[1].dev = xendev_to_physdev(ext[1].disk); 6.147 - if ( pseg[1].dev == 0 ) 6.148 - { 6.149 - DPRINTK ("bogus device for pseg[1] \n"); 6.150 - goto fail; 6.151 - } 6.152 - 6.153 - /* We don't allow overrun onto a third physical extent. */ 6.154 - if ( pseg[1].nr_sects > ext[1].size ) 6.155 - { 6.156 - DPRINTK ("third extent\n"); 6.157 - DPRINTK (" sum:%d, e0:%ld, e1:%ld p1.sect:%ld p1.nr:%d\n", 6.158 - sum, ext[0].size, ext[1].size, 6.159 - pseg[1].sector_number, pseg[1].nr_sects); 6.160 - goto fail; 6.161 - } 6.162 - 6.163 - return 2; /* We overran onto a second physical extent. */ 6.164 - 6.165 - fail: 6.166 - DPRINTK ("xen_segment_map_request failure\n"); 6.167 - DPRINTK ("operation: %d\n", operation); 6.168 - DPRINTK ("segment number: %d\n", segment_number); 6.169 - DPRINTK ("sect_nr: %ld 0x%lx\n", sect_nr, sect_nr); 6.170 - DPRINTK ("nr_sects: %d 0x%x\n", nr_sects, nr_sects); 6.171 - return -1; 6.172 -} 6.173 - 6.174 -/* 6.175 - * xen_segment_probe 6.176 - * 6.177 - * return a list of segments to the guestos 6.178 - */ 6.179 -void xen_segment_probe(struct task_struct *p, xen_disk_info_t *raw_xdi) 6.180 -{ 6.181 - int loop, i; 6.182 - xen_disk_info_t *xdi = map_domain_mem(virt_to_phys(raw_xdi)); 6.183 - unsigned long capacity = 0, device; 6.184 - 6.185 - spin_lock(&xsegment_lock); 6.186 - xdi->count = 0; 6.187 - for ( loop = 0; loop < XEN_MAX_SEGMENTS; loop++ ) 6.188 - { 6.189 - if ( (xsegments[loop].mode == XEN_SEGMENT_UNUSED) || 6.190 - (xsegments[loop].domain != p->domain) ) 6.191 - continue; 6.192 - 6.193 - device = MK_VIRTUAL_XENDEV(xsegments[loop].segment_number); 6.194 - for ( i = 0; i < xsegments[loop].num_extents; i++ ) 6.195 - capacity += xsegments[loop].extents[i].size; 6.196 - 6.197 - xdi->disks[xdi->count].device = device; 6.198 - xdi->disks[xdi->count].capacity = capacity; 6.199 - xdi->count++; 6.200 - } 6.201 - spin_unlock(&xsegment_lock); 6.202 - 6.203 - unmap_domain_mem(xdi); 6.204 -} 6.205 - 6.206 -/* 6.207 - * xen_segment_probe_all 6.208 - * 6.209 - * return a list of all segments to domain 0 6.210 - */ 6.211 -void xen_segment_probe_all(xen_segment_info_t *raw_xsi) 6.212 -{ 6.213 - int loop; 6.214 - xen_segment_info_t *xsi = map_domain_mem(virt_to_phys(raw_xsi)); 6.215 - 6.216 - spin_lock(&xsegment_lock); 6.217 - xsi->count = 0; 6.218 - for ( loop = 0; loop < XEN_MAX_SEGMENTS; loop++ ) 6.219 - { 6.220 - if ( xsegments[loop].mode == XEN_SEGMENT_UNUSED ) 6.221 - continue; 6.222 - 6.223 - xsi->segments[xsi->count].mode = xsegments[loop].mode; 6.224 - xsi->segments[xsi->count].domain = xsegments[loop].domain; 6.225 - memcpy(xsi->segments[xsi->count].key, 6.226 - xsegments[loop].key, 6.227 - XEN_SEGMENT_KEYSIZE); 6.228 - xsi->segments[xsi->count].seg_nr = xsegments[loop].segment_number; 6.229 - xsi->count++; 6.230 - } 6.231 - spin_unlock(&xsegment_lock); 6.232 - 6.233 - unmap_domain_mem(xsi); 6.234 -} 6.235 - 6.236 -/* 6.237 - * xen_refresh_segment_list 6.238 - * 6.239 - * find all segments associated with a domain and assign 6.240 - * them to the domain 6.241 - * 6.242 - */ 6.243 -void xen_refresh_segment_list (struct task_struct *p) 6.244 -{ 6.245 - int loop; 6.246 - 6.247 - spin_lock(&xsegment_lock); 6.248 - for (loop = 0; loop < XEN_MAX_SEGMENTS; loop++) 6.249 - { 6.250 - if ( (xsegments[loop].mode == XEN_SEGMENT_UNUSED) || 6.251 - (xsegments[loop].domain != p->domain) ) 6.252 - continue; 6.253 - 6.254 - p->segment_list[xsegments[loop].segment_number] = &xsegments[loop]; 6.255 - } 6.256 - spin_unlock(&xsegment_lock); 6.257 -} 6.258 - 6.259 -/* 6.260 - * create a new segment for a domain 6.261 - * 6.262 - * return 0 on success, 1 on failure 6.263 - * 6.264 - * if we see the same DOM#/SEG# combination, we reuse the slot in 6.265 - * the segment table (overwriting what was there before). 6.266 - * an alternative would be to raise an error if the slot is reused. 6.267 - */ 6.268 -int xen_segment_create(xv_disk_t *xvd_in) 6.269 -{ 6.270 - int idx; 6.271 - int loop; 6.272 - xv_disk_t *xvd = map_domain_mem(virt_to_phys(xvd_in)); 6.273 - struct task_struct *p; 6.274 - 6.275 - spin_lock(&xsegment_lock); 6.276 - for (idx = 0; idx < XEN_MAX_SEGMENTS; idx++) 6.277 - { 6.278 - if (xsegments[idx].mode == XEN_SEGMENT_UNUSED || 6.279 - (xsegments[idx].domain == xvd->domain && 6.280 - xsegments[idx].segment_number == xvd->segment)) break; 6.281 - } 6.282 - if (idx == XEN_MAX_SEGMENTS) 6.283 - { 6.284 - printk (KERN_ALERT "xen_segment_create: unable to find free slot\n"); 6.285 - unmap_domain_mem(xvd); 6.286 - return 1; 6.287 - } 6.288 - 6.289 - xsegments[idx].mode = xvd->mode; 6.290 - xsegments[idx].domain = xvd->domain; 6.291 - xsegments[idx].segment_number = xvd->segment; 6.292 - memcpy(xsegments[idx].key, xvd->key, XEN_SEGMENT_KEYSIZE); 6.293 - xsegments[idx].num_extents = xvd->ext_count; 6.294 - 6.295 - 6.296 - if (xsegments[idx].extents) 6.297 - kfree(xsegments[idx].extents); 6.298 - xsegments[idx].extents = (extent_t *)kmalloc( 6.299 - sizeof(extent_t)*xvd->ext_count, 6.300 - GFP_KERNEL); 6.301 - 6.302 - /* could memcpy, but this is safer */ 6.303 - for (loop = 0; loop < xvd->ext_count; loop++) 6.304 - { 6.305 - xsegments[idx].extents[loop].disk = xvd->extents[loop].disk; 6.306 - xsegments[idx].extents[loop].offset = xvd->extents[loop].offset; 6.307 - xsegments[idx].extents[loop].size = xvd->extents[loop].size; 6.308 - if (xsegments[idx].extents[loop].size == 0) 6.309 - { 6.310 - printk("xen_segment_create: extent %d is zero length\n", loop); 6.311 - unmap_domain_mem(xvd); 6.312 - return 1; 6.313 - } 6.314 - } 6.315 - 6.316 - /* if the domain exists, assign the segment to the domain */ 6.317 - p = find_domain_by_id(xvd->domain); 6.318 - if (p != NULL) 6.319 - { 6.320 - p->segment_list[xvd->segment] = &xsegments[idx]; 6.321 - put_task_struct(p); 6.322 - } 6.323 - 6.324 - spin_unlock(&xsegment_lock); 6.325 - 6.326 - unmap_domain_mem(xvd); 6.327 - return 0; 6.328 -} 6.329 - 6.330 -/* 6.331 - * delete a segment from a domain 6.332 - * 6.333 - * return 0 on success, 1 on failure 6.334 - * 6.335 - * This should *only* be called from domain shutdown, or else we 6.336 - * race with access checking. 6.337 - */ 6.338 -int xen_segment_delete(struct task_struct *p, int segnr) 6.339 -{ 6.340 - segment_t *seg; 6.341 - 6.342 - if (!p) { 6.343 - printk("xen_segment delete called with NULL domain?\n"); 6.344 - BUG(); 6.345 - return 1; 6.346 - } 6.347 - 6.348 - if (segnr < 0 || segnr > XEN_MAX_SEGMENTS) { 6.349 - printk("xen_segment_delete called with bad segnr?\n"); 6.350 - BUG(); 6.351 - return 1; 6.352 - } 6.353 - 6.354 - if (!p->segment_list[segnr]) 6.355 - return 1; 6.356 - 6.357 - seg = p->segment_list[segnr]; 6.358 - 6.359 - /* sanity checking */ 6.360 - if (seg->domain != p->domain || seg->segment_number != segnr || 6.361 - (seg->mode != XEN_SEGMENT_RO && seg->mode != XEN_SEGMENT_RW) || 6.362 - seg->num_extents <= 0 || seg->extents == NULL) { 6.363 - printk("segment is insane!\n"); 6.364 - BUG(); 6.365 - return 1; 6.366 - } 6.367 - 6.368 - spin_lock(&xsegment_lock); 6.369 - 6.370 - p->segment_list[segnr] = NULL; 6.371 - seg->domain = -1; 6.372 - seg->segment_number = -1; 6.373 - kfree(seg->extents); 6.374 - seg->mode = XEN_SEGMENT_UNUSED; 6.375 - 6.376 - spin_unlock(&xsegment_lock); 6.377 - 6.378 - return 0; 6.379 -} 6.380 - 6.381 -static void dump_segments(u_char key, void *dev_id, struct pt_regs *regs) 6.382 -{ 6.383 - int loop, i; 6.384 - struct task_struct *p; 6.385 - 6.386 - printk("segment list\n"); 6.387 - for (loop = 0; loop < XEN_MAX_SEGMENTS; loop++) 6.388 - { 6.389 - if (xsegments[loop].mode != XEN_SEGMENT_UNUSED) 6.390 - { 6.391 - printk(" %2d: %s dom%d, seg# %d, num_exts: %d\n", 6.392 - loop, 6.393 - xsegments[loop].mode == XEN_SEGMENT_RO ? "RO" : "RW", 6.394 - xsegments[loop].domain, xsegments[loop].segment_number, 6.395 - xsegments[loop].num_extents); 6.396 - for (i = 0; i < xsegments[loop].num_extents; i++) 6.397 - { 6.398 - printk(" extent %d: disk 0x%x, offset 0x%lx, size 0x%lx\n", 6.399 - i, xsegments[loop].extents[i].disk, 6.400 - xsegments[loop].extents[i].offset, 6.401 - xsegments[loop].extents[i].size); 6.402 - } 6.403 - } 6.404 - } 6.405 - 6.406 - printk("segments by domain (index into segments list)\n"); 6.407 - p = current; 6.408 - do 6.409 - { 6.410 - printk(" domain %d: ", p->domain); 6.411 - for (loop = 0; loop < XEN_MAX_SEGMENTS; loop++) 6.412 - { 6.413 - if (p->segment_list[loop]) 6.414 - { 6.415 - printk (" %d", p->segment_list[loop] - xsegments); 6.416 - } 6.417 - } 6.418 - printk("\n"); 6.419 - p = p->next_task; 6.420 - } while (p != current); 6.421 -} 6.422 - 6.423 -/* 6.424 - * initialize segments 6.425 - */ 6.426 - 6.427 -void xen_segment_initialize(void) 6.428 -{ 6.429 - memset (xsegments, 0, sizeof(xsegments)); 6.430 - 6.431 - add_key_handler('S', dump_segments, "dump segments"); 6.432 -}
7.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 7.2 +++ b/xen/drivers/block/xen_vbd.c Wed Oct 29 16:27:23 2003 +0000 7.3 @@ -0,0 +1,666 @@ 7.4 +/* 7.5 + * xen_vbd.c : routines for managing virtual block devices 7.6 + */ 7.7 + 7.8 +#include <xeno/config.h> 7.9 +#include <xeno/types.h> 7.10 +#include <xeno/lib.h> 7.11 +#include <asm/io.h> 7.12 +#include <xeno/slab.h> 7.13 +#include <xeno/sched.h> 7.14 +#include <xeno/vbd.h> 7.15 +#include <xeno/blkdev.h> 7.16 +#include <xeno/keyhandler.h> 7.17 +#include <asm/current.h> 7.18 +#include <asm/domain_page.h> 7.19 + 7.20 +/* Global list of all possible vbds. This can be changed in 7.21 + the following way: 7.22 + 7.23 + 1) UNUSED vbd -> RO or RW vbd. This requires the spinlock. 7.24 + 7.25 + 2) RO or RW -> UNUSED. This requires the lock and can only happen 7.26 + during process teardown. 7.27 + 7.28 + This means that processes can access entries in the list safely 7.29 + without having to hold any lock at all: they already have an entry 7.30 + allocated, and we know that entry can't become unused, as vbds 7.31 + are only torn down when the domain is dieing, by which point it 7.32 + can't be accessing them anymore. */ 7.33 +static vbd_t xvbds[XEN_MAX_VBDS]; 7.34 +static spinlock_t xvbd_lock = SPIN_LOCK_UNLOCKED; 7.35 + 7.36 +#if 0 7.37 +#define DPRINTK(_f, _a...) printk( _f , ## _a ) 7.38 +#else 7.39 +#define DPRINTK(_f, _a...) ((void)0) 7.40 +#endif 7.41 + 7.42 +/* 7.43 + * xen_vbd_map_request 7.44 + * 7.45 + * xen_device must be a valid device. 7.46 + * 7.47 + * NB. All offsets and sizes here are in sector units. 7.48 + * eg. 'size == 1' means an actual size of 512 bytes. 7.49 + * 7.50 + * Note that no locking is performed here whatsoever -- 7.51 + * we rely on the fact that once vbd information is 7.52 + * established, it is only modified by domain shutdown, 7.53 + * and so if this is being called, noone is trying 7.54 + * to modify the vbd list. 7.55 + */ 7.56 +int xen_vbd_map_request( 7.57 + phys_seg_t *pseg, struct task_struct *p, int operation, 7.58 + unsigned short vbd_number, 7.59 + unsigned long sect_nr, unsigned long buffer, unsigned short nr_sects) 7.60 +{ 7.61 + vbd_t *seg; 7.62 + extent_t *ext; 7.63 + int sum, i; 7.64 + 7.65 + vbd_number &= XENDEV_IDX_MASK; 7.66 + if ( vbd_number >= XEN_MAX_VBDS ) 7.67 + { 7.68 + DPRINTK("invalid vbd number. %d %d\n", 7.69 + vbd_number, XEN_MAX_VBDS); 7.70 + goto fail; 7.71 + } 7.72 + 7.73 + seg = p->vbd_list[vbd_number]; 7.74 + if ( seg == NULL ) 7.75 + { 7.76 + DPRINTK("vbd is null. %d\n", vbd_number); 7.77 + goto fail; 7.78 + } 7.79 + 7.80 + /* check domain permissions */ 7.81 + if ( seg->domain != p->domain ) 7.82 + { 7.83 + DPRINTK("seg is for another domain. %d %d\n", seg->domain, p->domain); 7.84 + goto fail; 7.85 + } 7.86 + 7.87 + /* check rw access */ 7.88 + if ( ((operation == WRITE) && (seg->mode != XEN_VBD_RW)) || 7.89 + ((operation == READ) && (seg->mode == XEN_VBD_UNUSED)) ) 7.90 + { 7.91 + DPRINTK("illegal operation: %d %d\n", operation, seg->mode); 7.92 + goto fail; 7.93 + } 7.94 + 7.95 + if ( (nr_sects + sect_nr) <= sect_nr ) 7.96 + { 7.97 + DPRINTK("sector + size wrap! %08lx %04x\n", sect_nr, nr_sects); 7.98 + goto fail; 7.99 + } 7.100 + 7.101 + /* find extent, check size */ 7.102 + sum = 0; 7.103 + i = 0; 7.104 + ext = seg->extents; 7.105 + while ( (i < seg->num_extents) && ((sum + ext->nr_sectors) <= sect_nr) ) 7.106 + { 7.107 + sum += ext->nr_sectors; 7.108 + ext++; i++; 7.109 + } 7.110 + 7.111 + if ( (sum + ext->nr_sectors) <= sect_nr ) 7.112 + { 7.113 + DPRINTK("extent size mismatch: %d %d : %d %ld %ld\n", 7.114 + i, seg->num_extents, sum, ext->nr_sectors, sect_nr); 7.115 + goto fail; 7.116 + } 7.117 + 7.118 + pseg->sector_number = (sect_nr - sum) + ext->start_sector; 7.119 + pseg->buffer = buffer; 7.120 + pseg->nr_sects = nr_sects; 7.121 + pseg->dev = xendev_to_physdev(ext->raw_device); 7.122 + if ( pseg->dev == 0 ) 7.123 + { 7.124 + DPRINTK ("invalid device 0x%x 0x%lx 0x%lx\n", 7.125 + ext->raw_device, ext->start_sector, ext->nr_sectors); 7.126 + goto fail; 7.127 + } 7.128 + 7.129 + /* We're finished if the virtual extent didn't overrun the phys extent. */ 7.130 + if ( (sum + ext->nr_sectors) >= (sect_nr + nr_sects) ) 7.131 + return 1; /* entire read fits in this extent */ 7.132 + 7.133 + /* Hmmm... make sure there's another extent to overrun onto! */ 7.134 + if ( (i+1) == seg->num_extents ) 7.135 + { 7.136 + DPRINTK ("not enough extents %d %d\n", 7.137 + i, seg->num_extents); 7.138 + goto fail; 7.139 + } 7.140 + 7.141 + pseg[1].nr_sects = (sect_nr + nr_sects) - (sum + ext->nr_sectors); 7.142 + pseg[0].nr_sects = sum + ext->nr_sectors - sect_nr; 7.143 + pseg[1].buffer = buffer + (pseg->nr_sects << 9); 7.144 + pseg[1].sector_number = ext[1].start_sector; 7.145 + pseg[1].dev = xendev_to_physdev(ext[1].raw_device); 7.146 + if ( pseg[1].dev == 0 ) 7.147 + { 7.148 + DPRINTK ("bogus device for pseg[1] \n"); 7.149 + goto fail; 7.150 + } 7.151 + 7.152 + /* We don't allow overrun onto a third physical extent. */ 7.153 + if ( pseg[1].nr_sects > ext[1].nr_sectors ) 7.154 + { 7.155 + DPRINTK ("third extent\n"); 7.156 + DPRINTK (" sum:%d, e0:%ld, e1:%ld p1.sect:%ld p1.nr:%d\n", 7.157 + sum, ext[0].nr_sectors, ext[1].nr_sectors, 7.158 + pseg[1].sector_number, pseg[1].nr_sects); 7.159 + goto fail; 7.160 + } 7.161 + 7.162 + return 2; /* We overran onto a second physical extent. */ 7.163 + 7.164 + fail: 7.165 + DPRINTK ("xen_vbd_map_request failure\n"); 7.166 + DPRINTK ("operation: %d\n", operation); 7.167 + DPRINTK ("vbd number: %d\n", vbd_number); 7.168 + DPRINTK ("sect_nr: %ld 0x%lx\n", sect_nr, sect_nr); 7.169 + DPRINTK ("nr_sects: %d 0x%x\n", nr_sects, nr_sects); 7.170 + return -1; 7.171 +} 7.172 + 7.173 +/* 7.174 + * vbd_probe_devices: 7.175 + * 7.176 + * add the virtual block devices for this domain to a xen_disk_info_t; 7.177 + * we assume xdi->count points to the first unused place in the array. 7.178 + */ 7.179 +void vbd_probe_devices(xen_disk_info_t *xdi, struct task_struct *p) 7.180 +{ 7.181 + int loop, i; 7.182 + unsigned long capacity = 0, device; 7.183 + 7.184 + spin_lock(&xvbd_lock); 7.185 + for (loop = 0; loop < XEN_MAX_VBDS; loop++ ) 7.186 + { 7.187 + if ( (xvbds[loop].mode == XEN_VBD_UNUSED) || 7.188 + (xvbds[loop].domain != p->domain) ) 7.189 + continue; 7.190 + 7.191 + device = MK_VIRTUAL_XENDEV(xvbds[loop].vbd_number); 7.192 + for ( i = 0; i < xvbds[loop].num_extents; i++ ) 7.193 + capacity += xvbds[loop].extents[i].nr_sectors; 7.194 + 7.195 + xdi->disks[xdi->count].device = device; 7.196 + xdi->disks[xdi->count].capacity = capacity; 7.197 + xdi->count++; 7.198 + } 7.199 + spin_unlock(&xvbd_lock); 7.200 + return; 7.201 +} 7.202 + 7.203 +/* 7.204 + * xen_refresh_vbd_list 7.205 + * 7.206 + * find all vbds associated with a domain and assign 7.207 + * them to the domain 7.208 + * 7.209 + */ 7.210 +void xen_refresh_vbd_list (struct task_struct *p) 7.211 +{ 7.212 + int loop; 7.213 + 7.214 + spin_lock(&xvbd_lock); 7.215 + for (loop = 0; loop < XEN_MAX_VBDS; loop++) 7.216 + { 7.217 + if ( (xvbds[loop].mode == XEN_VBD_UNUSED) || 7.218 + (xvbds[loop].domain != p->domain) ) 7.219 + continue; 7.220 + 7.221 + p->vbd_list[xvbds[loop].vbd_number] = &xvbds[loop]; 7.222 + } 7.223 + spin_unlock(&xvbd_lock); 7.224 +} 7.225 + 7.226 +/* 7.227 + * create a new vbd for a domain 7.228 + * 7.229 + * return 0 on success, 1 on failure 7.230 + * 7.231 + * if we see the same DOM#/SEG# combination, we reuse the slot in 7.232 + * the vbd table (overwriting what was there before). 7.233 + * an alternative would be to raise an error if the slot is reused. 7.234 + */ 7.235 +int xen_vbd_create(xv_disk_t *xvd_in) 7.236 +{ 7.237 + int idx; 7.238 + int loop; 7.239 + xv_disk_t *xvd = map_domain_mem(virt_to_phys(xvd_in)); 7.240 + struct task_struct *p; 7.241 + 7.242 + spin_lock(&xvbd_lock); 7.243 + for (idx = 0; idx < XEN_MAX_VBDS; idx++) 7.244 + { 7.245 + if (xvbds[idx].mode == XEN_VBD_UNUSED || 7.246 + (xvbds[idx].domain == xvd->domain && 7.247 + xvbds[idx].vbd_number == xvd->vbd)) break; 7.248 + } 7.249 + if (idx == XEN_MAX_VBDS) 7.250 + { 7.251 + printk (KERN_ALERT "xen_vbd_create: unable to find free slot\n"); 7.252 + unmap_domain_mem(xvd); 7.253 + return 1; 7.254 + } 7.255 + 7.256 + xvbds[idx].mode = xvd->mode; 7.257 + xvbds[idx].domain = xvd->domain; 7.258 + xvbds[idx].vbd_number = xvd->vbd; 7.259 + memcpy(xvbds[idx].key, xvd->key, XEN_VBD_KEYSIZE); 7.260 + xvbds[idx].num_extents = xvd->ext_count; 7.261 + 7.262 + 7.263 + if (xvbds[idx].extents) 7.264 + kfree(xvbds[idx].extents); 7.265 + xvbds[idx].extents = (extent_t *)kmalloc( 7.266 + sizeof(extent_t)*xvd->ext_count, 7.267 + GFP_KERNEL); 7.268 + 7.269 + /* could memcpy, but this is safer */ 7.270 + for (loop = 0; loop < xvd->ext_count; loop++) 7.271 + { 7.272 + xvbds[idx].extents[loop].raw_device = xvd->extents[loop].disk; 7.273 + xvbds[idx].extents[loop].start_sector = 7.274 + xvd->extents[loop].offset; 7.275 + xvbds[idx].extents[loop].nr_sectors = xvd->extents[loop].size; 7.276 + if (xvbds[idx].extents[loop].nr_sectors == 0) 7.277 + { 7.278 + printk("xen_vbd_create: extent %d is zero length\n", loop); 7.279 + unmap_domain_mem(xvd); 7.280 + return 1; 7.281 + } 7.282 + } 7.283 + 7.284 + /* if the domain exists, assign the vbd to the domain */ 7.285 + p = find_domain_by_id(xvd->domain); 7.286 + if (p != NULL) 7.287 + { 7.288 + p->vbd_list[xvd->vbd] = &xvbds[idx]; 7.289 + put_task_struct(p); 7.290 + } 7.291 + 7.292 + spin_unlock(&xvbd_lock); 7.293 + 7.294 + unmap_domain_mem(xvd); 7.295 + return 0; 7.296 +} 7.297 + 7.298 +/* 7.299 + * delete a vbd from a domain 7.300 + * 7.301 + * return 0 on success, 1 on failure 7.302 + * 7.303 + * This should *only* be called from domain shutdown, or else we 7.304 + * race with access checking. 7.305 + */ 7.306 +int xen_vbd_delete(struct task_struct *p, int segnr) 7.307 +{ 7.308 + vbd_t *seg; 7.309 + 7.310 + if (!p) { 7.311 + printk("xen_vbd delete called with NULL domain?\n"); 7.312 + BUG(); 7.313 + return 1; 7.314 + } 7.315 + 7.316 + if (segnr < 0 || segnr > XEN_MAX_VBDS) { 7.317 + printk("xen_vbd_delete called with bad segnr?\n"); 7.318 + BUG(); 7.319 + return 1; 7.320 + } 7.321 + 7.322 + if (!p->vbd_list[segnr]) 7.323 + return 1; 7.324 + 7.325 + seg = p->vbd_list[segnr]; 7.326 + 7.327 + /* sanity checking */ 7.328 + if (seg->domain != p->domain || seg->vbd_number != segnr || 7.329 + (seg->mode != XEN_VBD_RO && seg->mode != XEN_VBD_RW) || 7.330 + seg->num_extents <= 0 || seg->extents == NULL) { 7.331 + printk("vbd is insane!\n"); 7.332 + BUG(); 7.333 + return 1; 7.334 + } 7.335 + 7.336 + spin_lock(&xvbd_lock); 7.337 + 7.338 + p->vbd_list[segnr] = NULL; 7.339 + seg->domain = -1; 7.340 + seg->vbd_number = -1; 7.341 + kfree(seg->extents); 7.342 + seg->mode = XEN_VBD_UNUSED; 7.343 + 7.344 + spin_unlock(&xvbd_lock); 7.345 + 7.346 + return 0; 7.347 +} 7.348 + 7.349 +static void dump_vbds(u_char key, void *dev_id, struct pt_regs *regs) 7.350 +{ 7.351 + int loop, i; 7.352 + struct task_struct *p; 7.353 + 7.354 + printk("vbd list\n"); 7.355 + for (loop = 0; loop < XEN_MAX_VBDS; loop++) 7.356 + { 7.357 + if (xvbds[loop].mode != XEN_VBD_UNUSED) 7.358 + { 7.359 + printk(" %2d: %s dom%d, seg# %d, num_exts: %d\n", 7.360 + loop, 7.361 + xvbds[loop].mode == XEN_VBD_RO ? "RO" : "RW", 7.362 + xvbds[loop].domain, xvbds[loop].vbd_number, 7.363 + xvbds[loop].num_extents); 7.364 + for (i = 0; i < xvbds[loop].num_extents; i++) 7.365 + { 7.366 + printk(" extent %d: raw device 0x%x, start_sector 0x%lx" 7.367 + " nr_sectors 0x%lx\n", 7.368 + i, xvbds[loop].extents[i].raw_device, 7.369 + xvbds[loop].extents[i].start_sector, 7.370 + xvbds[loop].extents[i].nr_sectors); 7.371 + } 7.372 + } 7.373 + } 7.374 + 7.375 + printk("vbds by domain (index into vbds list)\n"); 7.376 + p = current; 7.377 + do 7.378 + { 7.379 + if(is_idle_task(p)) 7.380 + continue; 7.381 + 7.382 + printk(" domain %d: ", p->domain); 7.383 + for (loop = 0; loop < XEN_MAX_VBDS; loop++) 7.384 + { 7.385 + if (p->vbd_list[loop]) 7.386 + { 7.387 + printk (" %d", p->vbd_list[loop] - xvbds); 7.388 + } 7.389 + } 7.390 + printk("\n"); 7.391 + p = p->next_task; 7.392 + } while (p != current); 7.393 +} 7.394 + 7.395 +/* 7.396 + * initialize vbds 7.397 + */ 7.398 + 7.399 +void xen_vbd_initialize(void) 7.400 +{ 7.401 + memset (xvbds, 0, sizeof(xvbds)); 7.402 + 7.403 + add_key_handler('S', dump_vbds, "dump vbds"); 7.404 +} 7.405 + 7.406 + 7.407 +/* The idea is that, for each sector of each disk, each domain has two 7.408 + bits, saying whether they can read the sector or write it. That 7.409 + would take too much memory, so instead each process has a list of 7.410 + (device, start, end, mode) quads which say what it has access to, 7.411 + and we fake the logical view on top of that. */ 7.412 +struct physdisk_ace { 7.413 + struct list_head list; 7.414 + unsigned short device; 7.415 + unsigned short partition; 7.416 + unsigned long start_sect; 7.417 + unsigned long n_sectors; 7.418 + int mode; 7.419 +}; 7.420 + 7.421 + 7.422 +/* Operation is a blkdev constant i.e. READ, WRITE, ... */ 7.423 +/* Must be called with p->physdev_lock held. */ 7.424 +static struct physdisk_ace *find_ace(const struct task_struct *p, 7.425 + unsigned short dev, 7.426 + unsigned long sect, int operation) 7.427 +{ 7.428 + struct list_head *cur_ace_head; 7.429 + struct physdisk_ace *cur_ace; 7.430 + 7.431 + list_for_each(cur_ace_head, &p->physdisk_aces) 7.432 + { 7.433 + cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list); 7.434 + DPRINTK("Is [%lx, %lx) good for %lx?\n", 7.435 + cur_ace->start_sect, 7.436 + cur_ace->start_sect + cur_ace->n_sectors, sect); 7.437 + if ( (sect >= cur_ace->start_sect) && 7.438 + (sect < (cur_ace->start_sect + cur_ace->n_sectors)) && 7.439 + (dev == cur_ace->device) && 7.440 + (((operation == READ) && (cur_ace->mode & PHYSDISK_MODE_R)) || 7.441 + ((operation == WRITE) && (cur_ace->mode & PHYSDISK_MODE_W))) ) 7.442 + return cur_ace; 7.443 + } 7.444 + return NULL; 7.445 +} 7.446 + 7.447 +/* Hold the lock on entry, it remains held on exit. */ 7.448 +static void xen_physdisk_revoke_access(unsigned short dev, 7.449 + unsigned long start_sect, 7.450 + unsigned long n_sectors, 7.451 + struct task_struct *p) 7.452 +{ 7.453 + /* Find every ace which intersects [start_sect, start_sect + 7.454 + n_sectors] and either remove it completely or truncate it 7.455 + down. */ 7.456 + struct list_head *cur_ace_head; 7.457 + struct physdisk_ace *cur_ace, *new_ace; 7.458 + unsigned long kill_zone_end, ace_end; 7.459 + 7.460 + kill_zone_end = start_sect + n_sectors; 7.461 + list_for_each(cur_ace_head, &p->physdisk_aces) 7.462 + { 7.463 + cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list); 7.464 + ace_end = cur_ace->start_sect + cur_ace->n_sectors; 7.465 + if ( (cur_ace->start_sect >= kill_zone_end) || 7.466 + (ace_end <= start_sect) || 7.467 + (cur_ace->device != dev) ) 7.468 + continue; 7.469 + 7.470 + DPRINTK("Killing ace [%lx, %lx) against kill zone [%lx, %lx)\n", 7.471 + cur_ace->start_sect, ace_end, start_sect, kill_zone_end); 7.472 + 7.473 + if ( (cur_ace->start_sect >= start_sect) && 7.474 + (ace_end <= kill_zone_end) ) 7.475 + { 7.476 + /* ace entirely within kill zone -> kill it */ 7.477 + list_del(cur_ace_head); 7.478 + cur_ace_head = cur_ace_head->prev; 7.479 + kfree(cur_ace); 7.480 + } 7.481 + else if ( ace_end <= kill_zone_end ) 7.482 + { 7.483 + /* ace start before kill start, ace end in kill zone, 7.484 + move ace end. */ 7.485 + cur_ace->n_sectors = start_sect - cur_ace->start_sect; 7.486 + } 7.487 + else if ( cur_ace->start_sect >= start_sect ) 7.488 + { 7.489 + /* ace start after kill start, ace end outside kill zone, 7.490 + move ace start. */ 7.491 + cur_ace->start_sect = kill_zone_end; 7.492 + cur_ace->n_sectors = ace_end - cur_ace->start_sect; 7.493 + } 7.494 + else 7.495 + { 7.496 + /* The fun one: the ace entirely includes the kill zone. */ 7.497 + /* Cut the current ace down to just the bit before the kzone, 7.498 + create a new ace for the bit just after it. */ 7.499 + new_ace = kmalloc(sizeof(*cur_ace), GFP_KERNEL); 7.500 + new_ace->device = dev; 7.501 + new_ace->start_sect = kill_zone_end; 7.502 + new_ace->n_sectors = ace_end - kill_zone_end; 7.503 + new_ace->mode = cur_ace->mode; 7.504 + 7.505 + cur_ace->n_sectors = start_sect - cur_ace->start_sect; 7.506 + 7.507 + list_add(&new_ace->list, cur_ace_head); 7.508 + } 7.509 + } 7.510 +} 7.511 + 7.512 +/* Hold the lock on entry, it remains held on exit. */ 7.513 +static int xen_physdisk_grant_access(unsigned short dev, 7.514 + unsigned short partition, 7.515 + unsigned long start_sect, 7.516 + unsigned long n_sectors, 7.517 + int mode, struct task_struct *p) 7.518 +{ 7.519 + struct physdisk_ace *cur_ace; 7.520 + 7.521 + /* Make sure it won't overlap with any existing ACEs. */ 7.522 + /* XXX this isn't quite right if the domain already has read access 7.523 + and we try to grant write access, or vice versa. */ 7.524 + xen_physdisk_revoke_access(dev, start_sect, n_sectors, p); 7.525 + 7.526 + if ( mode ) 7.527 + { 7.528 + cur_ace = kmalloc(sizeof(*cur_ace), GFP_KERNEL); 7.529 + cur_ace->device = dev; 7.530 + cur_ace->start_sect = start_sect; 7.531 + cur_ace->n_sectors = n_sectors; 7.532 + cur_ace->mode = mode; 7.533 + cur_ace->partition = partition; 7.534 + 7.535 + list_add_tail(&cur_ace->list, &p->physdisk_aces); 7.536 + } 7.537 + 7.538 + return 0; 7.539 +} 7.540 + 7.541 +static void xen_physdisk_probe_access(physdisk_probebuf_t * buf, 7.542 + struct task_struct *p) 7.543 +{ 7.544 + int n_aces; 7.545 + struct list_head *cur_ace_head; 7.546 + struct physdisk_ace *cur_ace; 7.547 + int x = 0; 7.548 + 7.549 + n_aces = 0; 7.550 + list_for_each(cur_ace_head, &p->physdisk_aces) 7.551 + { 7.552 + x++; 7.553 + if ( x >= buf->start_ind ) 7.554 + { 7.555 + cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list); 7.556 + buf->entries[n_aces].device = cur_ace->device; 7.557 + buf->entries[n_aces].partition = cur_ace->partition; 7.558 + buf->entries[n_aces].start_sect = cur_ace->start_sect; 7.559 + buf->entries[n_aces].n_sectors = cur_ace->n_sectors; 7.560 + buf->entries[n_aces].mode = cur_ace->mode; 7.561 + n_aces++; 7.562 + } 7.563 + } 7.564 + buf->n_aces = n_aces; 7.565 +} 7.566 + 7.567 +int xen_physdisk_grant(xp_disk_t * xpd_in) 7.568 +{ 7.569 + struct task_struct *p = current; 7.570 + xp_disk_t *xpd = map_domain_mem(virt_to_phys(xpd_in)); 7.571 + int res; 7.572 + 7.573 + p = find_domain_by_id(xpd->domain); 7.574 + if ( p == NULL ) 7.575 + { 7.576 + DPRINTK("Bad domain!\n"); 7.577 + res = 1; 7.578 + goto out; 7.579 + } 7.580 + 7.581 + spin_lock(&p->physdev_lock); 7.582 + res = xen_physdisk_grant_access(xpd->device, 7.583 + xpd->partition, 7.584 + xpd->start_sect, 7.585 + xpd->n_sectors, xpd->mode, p); 7.586 + spin_unlock(&p->physdev_lock); 7.587 + put_task_struct(p); 7.588 + 7.589 + out: 7.590 + unmap_domain_mem(xpd); 7.591 + return res; 7.592 +} 7.593 + 7.594 +int xen_physdisk_probe(struct task_struct *requesting_domain, 7.595 + physdisk_probebuf_t * buf_in) 7.596 +{ 7.597 + struct task_struct *p; 7.598 + physdisk_probebuf_t *buf = map_domain_mem(virt_to_phys(buf_in)); 7.599 + int res; 7.600 + 7.601 + if ( (requesting_domain->domain != 0) && 7.602 + (requesting_domain->domain != buf->domain) ) 7.603 + { 7.604 + res = 1; 7.605 + goto out; 7.606 + } 7.607 + 7.608 + p = find_domain_by_id(buf->domain); 7.609 + if ( p == NULL ) 7.610 + { 7.611 + res = 1; 7.612 + goto out; 7.613 + } 7.614 + 7.615 + spin_lock(&p->physdev_lock); 7.616 + xen_physdisk_probe_access(buf, p); 7.617 + spin_unlock(&p->physdev_lock); 7.618 + put_task_struct(p); 7.619 + 7.620 + res = 0; 7.621 + out: 7.622 + unmap_domain_mem(buf); 7.623 + return res; 7.624 +} 7.625 + 7.626 +#define MAX(a,b) ((a) > (b) ? (a) : (b)) 7.627 + 7.628 +int xen_physdisk_access_okay(phys_seg_t * pseg, struct task_struct *p, 7.629 + int operation) 7.630 +{ 7.631 + struct physdisk_ace *cur_ace; 7.632 + unsigned long sect; 7.633 + 7.634 + DPRINTK 7.635 + ("Checking access for domain %d, start sect 0x%lx, length 0x%x.\n", 7.636 + p->domain, pseg->sector_number, pseg->nr_sects); 7.637 + 7.638 + for ( sect = pseg->sector_number; 7.639 + sect < pseg->sector_number + pseg->nr_sects; ) 7.640 + { 7.641 + /* XXX this would be a lot faster if the aces were sorted on start 7.642 + address. Also in revoke_access. */ 7.643 + spin_lock(&p->physdev_lock); 7.644 + cur_ace = find_ace(p, pseg->dev, sect, operation); 7.645 + spin_unlock(&p->physdev_lock); 7.646 + if ( cur_ace == NULL ) 7.647 + return 0; 7.648 + sect += 7.649 + MAX(cur_ace->n_sectors, 7.650 + pseg->nr_sects + pseg->sector_number - sect); 7.651 + } 7.652 + return 1; 7.653 +} 7.654 + 7.655 +void destroy_physdisk_aces(struct task_struct *p) 7.656 +{ 7.657 + struct list_head *cur_ace_head, *next_head; 7.658 + struct physdisk_ace *cur_ace; 7.659 + 7.660 + for ( cur_ace_head = p->physdisk_aces.next; 7.661 + cur_ace_head != &p->physdisk_aces; 7.662 + cur_ace_head = next_head ) 7.663 + { 7.664 + cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list); 7.665 + next_head = cur_ace_head->next; 7.666 + kfree(cur_ace); 7.667 + } 7.668 +} 7.669 +
8.1 --- a/xen/drivers/ide/ide-xeno.c Tue Oct 14 10:42:54 2003 +0000 8.2 +++ b/xen/drivers/ide/ide-xeno.c Wed Oct 29 16:27:23 2003 +0000 8.3 @@ -2,8 +2,7 @@ 8.4 #include <xeno/types.h> 8.5 #include <xeno/lib.h> 8.6 #include <xeno/ide.h> 8.7 -#include <xeno/segment.h> 8.8 -#include <hypervisor-ifs/block.h> 8.9 +#include <xeno/vbd.h> 8.10 #include <asm/domain_page.h> 8.11 #include <asm/io.h> 8.12 8.13 @@ -11,7 +10,6 @@ void ide_probe_devices(xen_disk_info_t* 8.14 { 8.15 int loop; 8.16 unsigned int unit; 8.17 - xen_disk_info_t *xen_xdi = map_domain_mem(virt_to_phys(xdi)); 8.18 unsigned long capacity; 8.19 unsigned short device, type; 8.20 ide_drive_t *drive; 8.21 @@ -36,17 +34,17 @@ void ide_probe_devices(xen_disk_info_t* 8.22 device = MK_IDE_XENDEV((loop * MAX_DRIVES) + unit); 8.23 capacity = current_capacity(drive); 8.24 8.25 - xen_xdi->disks[xen_xdi->count].device = device; 8.26 - xen_xdi->disks[xen_xdi->count].type = type; 8.27 - xen_xdi->disks[xen_xdi->count].capacity = capacity; 8.28 - xen_xdi->count++; 8.29 + xdi->disks[xdi->count].device = device; 8.30 + xdi->disks[xdi->count].type = type; 8.31 + xdi->disks[xdi->count].capacity = capacity; 8.32 + xdi->count++; 8.33 8.34 printk("Device %d: IDE-XENO (%s) capacity %ldkB (%ldMB)\n", 8.35 - xen_xdi->count, (type == XD_TYPE_DISK) ? "disk" : 8.36 + xdi->count, (type == XD_TYPE_DISK) ? "disk" : 8.37 ((type == XD_TYPE_CDROM) ? "cdrom" : "unknown"), 8.38 capacity>>1, capacity>>11); 8.39 } 8.40 } 8.41 - 8.42 - unmap_domain_mem(xen_xdi); 8.43 + 8.44 + return; 8.45 }
9.1 --- a/xen/drivers/scsi/sd.c Tue Oct 14 10:42:54 2003 +0000 9.2 +++ b/xen/drivers/scsi/sd.c Wed Oct 29 16:27:23 2003 +0000 9.3 @@ -1310,16 +1310,21 @@ static void sd_finish() 9.4 9.5 9.6 /* 9.7 -** XXX SMH: gross 'probe' function to allow xeno world to grope us; 9.8 -** this should really not be in the disk-specific code as it should 9.9 -** report tapes, CDs, etc. But for now this looks like the easiest 9.10 -** place to hook it in :-( 9.11 +** scsi_probe_devices: 9.12 +** 9.13 +** add the scsi block devices for this domain to a xen_disk_info_t; 9.14 +** we assume xdi->count points to the first unused place in the array. 9.15 +** 9.16 +** XXX SMH: this is a rather gross 'probe' function to allow xeno world 9.17 +** to grope us; this should really not be in the disk-specific code as 9.18 +** it should report tapes, CDs, etc. But for now this looks like the 9.19 +** easiest place to hook it in :-( 9.20 +** 9.21 */ 9.22 void scsi_probe_devices(xen_disk_info_t *xdi) 9.23 { 9.24 Scsi_Disk *sd; 9.25 int i; 9.26 - xen_disk_info_t *xen_xdi = map_domain_mem(virt_to_phys(xdi)); 9.27 unsigned long capacity, device; 9.28 9.29 for ( sd = rscsi_disks, i = 0; i < sd_template.dev_max; i++, sd++ ) 9.30 @@ -1330,16 +1335,16 @@ void scsi_probe_devices(xen_disk_info_t 9.31 capacity = sd->capacity; 9.32 9.33 /* XXX SMH: if make generic, need to properly determine 'type' */ 9.34 - xen_xdi->disks[xen_xdi->count].device = device; 9.35 - xen_xdi->disks[xen_xdi->count].type = XD_TYPE_DISK; 9.36 - xen_xdi->disks[xen_xdi->count].capacity = capacity; 9.37 - xen_xdi->count++; 9.38 + xdi->disks[xdi->count].device = device; 9.39 + xdi->disks[xdi->count].type = XD_TYPE_DISK; 9.40 + xdi->disks[xdi->count].capacity = capacity; 9.41 + xdi->count++; 9.42 9.43 printk("Device %d: SCSI-XENO (disk) capacity %ldkB (%ldMB)\n", 9.44 - xen_xdi->count, capacity>>1, capacity>>11); 9.45 + xdi->count, capacity>>1, capacity>>11); 9.46 } 9.47 9.48 - unmap_domain_mem(xen_xdi); 9.49 + return; 9.50 } 9.51 9.52
10.1 --- a/xen/include/hypervisor-ifs/block.h Tue Oct 14 10:42:54 2003 +0000 10.2 +++ b/xen/include/hypervisor-ifs/block.h Wed Oct 29 16:27:23 2003 +0000 10.3 @@ -42,16 +42,14 @@ 10.4 #define XEN_BLOCK_WRITE 1 10.5 #define XEN_BLOCK_READA 2 10.6 #define XEN_BLOCK_SPECIAL 4 10.7 -#define XEN_BLOCK_PROBE_BLK 5 /* get xhd config from hypervisor */ 10.8 -#define XEN_BLOCK_DEBUG 6 /* debug */ 10.9 -#define XEN_BLOCK_SEG_CREATE 7 /* create segment (vhd) */ 10.10 -#define XEN_BLOCK_SEG_DELETE 8 /* delete segment (vhd) */ 10.11 -#define XEN_BLOCK_PROBE_SEG 9 /* get vhd config from hypervisor */ 10.12 +#define XEN_BLOCK_PROBE 5 /* get config from hypervisor */ 10.13 +#define XEN_BLOCK_DEBUG 6 /* debug */ 10.14 +#define XEN_BLOCK_VBD_CREATE 7 /* create vbd */ 10.15 +#define XEN_BLOCK_VBD_DELETE 8 /* delete vbd */ 10.16 + /* XXX SMH: was 'probe vbd' */ 10.17 #define XEN_BLOCK_PHYSDEV_GRANT 10 /* grant access to range of disk blocks */ 10.18 -#define XEN_BLOCK_PHYSDEV_PROBE 11 /* probe for a domain's physdev 10.19 - accesses */ 10.20 -#define XEN_BLOCK_PROBE_SEG_ALL 12 /* prove for every domain's segments, 10.21 - not just ours. */ 10.22 +#define XEN_BLOCK_PHYSDEV_PROBE 11 /* probe for a domain's physdev accesses */ 10.23 + /* XXX SMH: was 'probe vbd all' */ 10.24 10.25 /* NB. Ring size must be small enough for sizeof(blk_ring_t) <= PAGE_SIZE. */ 10.26 #define BLK_RING_SIZE 64 10.27 @@ -119,59 +117,4 @@ typedef struct xen_disk_info 10.28 xen_disk_t disks[XEN_MAX_DISK_COUNT]; 10.29 } xen_disk_info_t; 10.30 10.31 -/* 10.32 - * 10.33 - * virtual disk (vhd) structures, used by XEN_BLOCK_SEG_{CREATE, DELETE} 10.34 - * 10.35 - */ 10.36 - 10.37 -#define XEN_DISK_READ_WRITE 1 10.38 -#define XEN_DISK_READ_ONLY 2 10.39 - 10.40 -typedef struct xv_extent 10.41 -{ 10.42 - int disk; /* physical disk number */ 10.43 - unsigned long offset; /* offset in blocks into physical disk */ 10.44 - unsigned long size; /* size in blocks */ 10.45 -} xv_extent_t; 10.46 - 10.47 -#define XEN_SEGMENT_KEYSIZE 10 10.48 - 10.49 -typedef struct xv_disk 10.50 -{ 10.51 - int mode; /* XEN_DISK_READ_WRITE or XEN_DISK_READ_ONLY */ 10.52 - int domain; /* domain */ 10.53 - int segment; /* segment number */ 10.54 - char key[XEN_SEGMENT_KEYSIZE]; /* key for benefit of dom0 userspace */ 10.55 - int ext_count; /* number of xv_extent_t to follow */ 10.56 - xv_extent_t extents[XEN_MAX_DISK_COUNT]; /* arbitrary reuse of constant */ 10.57 -} xv_disk_t; 10.58 - 10.59 -#define PHYSDISK_MODE_R 1 10.60 -#define PHYSDISK_MODE_W 2 10.61 -typedef struct xp_disk 10.62 -{ 10.63 - int mode; /* 0 -> revoke existing access, otherwise bitmask of 10.64 - PHYSDISK_MODE_? constants */ 10.65 - int domain; 10.66 - unsigned short device; /* XENDEV_??? + idx */ 10.67 - unsigned short partition; /* partition number */ 10.68 - unsigned long start_sect; 10.69 - unsigned long n_sectors; 10.70 -} xp_disk_t; 10.71 - 10.72 -#define PHYSDISK_MAX_ACES_PER_REQUEST 254 /* Make it fit in one page */ 10.73 -typedef struct { 10.74 - int n_aces; 10.75 - int domain; 10.76 - int start_ind; 10.77 - struct { 10.78 - unsigned short device; /* XENDEV_??? + idx */ 10.79 - unsigned short partition; /* partition number */ 10.80 - unsigned long start_sect; 10.81 - unsigned long n_sectors; 10.82 - unsigned mode; 10.83 - } entries[PHYSDISK_MAX_ACES_PER_REQUEST]; 10.84 -} physdisk_probebuf_t; 10.85 - 10.86 #endif
11.1 --- a/xen/include/hypervisor-ifs/segment.h Tue Oct 14 10:42:54 2003 +0000 11.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 11.3 @@ -1,21 +0,0 @@ 11.4 -#ifndef __HYP_IFS_SEGMENT_H__ 11.5 -#define __HYP_IFS_SEGMENT_H__ 11.6 - 11.7 -#define XEN_MAX_SEGMENTS 100 /* total number of segments across all doms */ 11.8 - 11.9 -#define XEN_SEGMENT_UNUSED 0 /* bzero default */ 11.10 -#define XEN_SEGMENT_RO XEN_DISK_READ_ONLY 11.11 -#define XEN_SEGMENT_RW XEN_DISK_READ_WRITE 11.12 - 11.13 -typedef struct xen_segment_info 11.14 -{ 11.15 - int count; 11.16 - struct { 11.17 - unsigned domain; 11.18 - unsigned seg_nr; 11.19 - char key[XEN_SEGMENT_KEYSIZE]; 11.20 - unsigned short mode; /* UNUSED, RO, or RW. */ 11.21 - } segments[XEN_MAX_SEGMENTS]; 11.22 -} xen_segment_info_t; 11.23 - 11.24 -#endif /* __HYP_IFS_SEGMENT_H__ */
12.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 12.2 +++ b/xen/include/hypervisor-ifs/vbd.h Wed Oct 29 16:27:23 2003 +0000 12.3 @@ -0,0 +1,115 @@ 12.4 +#ifndef __HYP_IFS_VBD_H__ 12.5 +#define __HYP_IFS_VBD_H__ 12.6 + 12.7 +#define XEN_MAX_VBDS 100 /* total number of vbds across all doms */ 12.8 + 12.9 +#define XEN_VBD_UNUSED 0 /* bzero default */ 12.10 +#define XEN_DISK_READ_WRITE 1 12.11 +#define XEN_DISK_READ_ONLY 2 12.12 +#define XEN_VBD_RO XEN_DISK_READ_ONLY 12.13 +#define XEN_VBD_RW XEN_DISK_READ_WRITE 12.14 + 12.15 +/* 12.16 + * 12.17 + * virtual disk (vhd) structures, used by XEN_BLOCK_VBD_{CREATE, DELETE} 12.18 + * 12.19 + */ 12.20 + 12.21 +typedef struct xv_extent 12.22 +{ 12.23 + int disk; /* physical disk number */ 12.24 + unsigned long offset; /* offset in blocks into physical disk */ 12.25 + unsigned long size; /* size in blocks */ 12.26 +} xv_extent_t; 12.27 + 12.28 +#define XEN_VBD_KEYSIZE 10 12.29 + 12.30 +typedef struct xv_disk 12.31 +{ 12.32 + int mode; /* XEN_DISK_READ_WRITE or XEN_DISK_READ_ONLY */ 12.33 + int domain; /* domain */ 12.34 + int vbd; /* segment number */ 12.35 + char key[XEN_VBD_KEYSIZE]; /* key for benefit of dom0 userspace */ 12.36 + int ext_count; /* number of xv_extent_t to follow */ 12.37 + xv_extent_t extents[XEN_MAX_DISK_COUNT]; /* arbitrary reuse of constant */ 12.38 +} xv_disk_t; 12.39 + 12.40 +#define PHYSDISK_MODE_R 1 12.41 +#define PHYSDISK_MODE_W 2 12.42 +typedef struct xp_disk 12.43 +{ 12.44 + int mode; /* 0 -> revoke existing access, otherwise bitmask of 12.45 + PHYSDISK_MODE_? constants */ 12.46 + int domain; 12.47 + unsigned short device; /* XENDEV_??? + idx */ 12.48 + unsigned short partition; /* partition number */ 12.49 + unsigned long start_sect; 12.50 + unsigned long n_sectors; 12.51 +} xp_disk_t; 12.52 + 12.53 +#define PHYSDISK_MAX_ACES_PER_REQUEST 254 /* Make it fit in one page */ 12.54 +typedef struct { 12.55 + int n_aces; 12.56 + int domain; 12.57 + int start_ind; 12.58 + struct { 12.59 + unsigned short device; /* XENDEV_??? + idx */ 12.60 + unsigned short partition; /* partition number */ 12.61 + unsigned long start_sect; 12.62 + unsigned long n_sectors; 12.63 + unsigned mode; 12.64 + } entries[PHYSDISK_MAX_ACES_PER_REQUEST]; 12.65 +} physdisk_probebuf_t; 12.66 + 12.67 + 12.68 +typedef struct xen_vbd_info 12.69 +{ 12.70 + int count; 12.71 + struct { 12.72 + unsigned domain; 12.73 + unsigned seg_nr; 12.74 + char key[XEN_VBD_KEYSIZE]; 12.75 + unsigned short mode; /* UNUSED, RO, or RW. */ 12.76 + } vbds[XEN_MAX_VBDS]; 12.77 +} xen_vbd_info_t; 12.78 + 12.79 + 12.80 + 12.81 +/* Block I/O trap operations and associated structures. 12.82 + */ 12.83 + 12.84 +#define BLOCK_IO_OP_SIGNAL 0 // let xen know we have work to do 12.85 +#define BLOCK_IO_OP_ATTACH_VBD 1 // attach a VBD to a given domain 12.86 + 12.87 + 12.88 +typedef struct _extent { 12.89 + u16 raw_device; 12.90 + ulong start_sector; 12.91 + ulong nr_sectors; 12.92 +} extent_t; 12.93 + 12.94 + 12.95 +typedef struct _vbd_attach { 12.96 + int domain; 12.97 + u16 mode; // read-only or read-write 12.98 + u16 device; // how this domain refers to this VBD 12.99 + int nr_extents; // number of extents in the VBD 12.100 + extent_t *extents; // pointer to /array/ of extents 12.101 +} vbd_attach_t; 12.102 + 12.103 + 12.104 +typedef struct block_io_op_st 12.105 +{ 12.106 + unsigned long cmd; 12.107 + union 12.108 + { 12.109 + long signal_val_unused; 12.110 + vbd_attach_t attach_info; 12.111 + } 12.112 + u; 12.113 +} block_io_op_t; 12.114 + 12.115 + 12.116 + 12.117 + 12.118 +#endif /* __HYP_IFS_VBD_H__ */
13.1 --- a/xen/include/xeno/block.h Tue Oct 14 10:42:54 2003 +0000 13.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 13.3 @@ -1,11 +0,0 @@ 13.4 -/* block.h 13.5 - * 13.6 - * this is the hypervisor end of the block io code. 13.7 - */ 13.8 - 13.9 -#include <hypervisor-ifs/block.h> 13.10 - 13.11 -/* vif prototypes */ 13.12 -blk_ring_t *create_block_ring(int domain); 13.13 -void destroy_block_ring(struct task_struct *p); 13.14 -
14.1 --- a/xen/include/xeno/physdisk.h Tue Oct 14 10:42:54 2003 +0000 14.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 14.3 @@ -1,14 +0,0 @@ 14.4 -#ifndef PHYSDISK_ACES__ 14.5 -#define PHYSDISK_ACES__ 14.6 - 14.7 -struct task_struct; 14.8 - 14.9 -void destroy_physdisk_aces(struct task_struct *p); 14.10 - 14.11 -int xen_physdisk_grant(xp_disk_t *); 14.12 -int xen_physdisk_probe(struct task_struct *requesting_task, 14.13 - physdisk_probebuf_t *); 14.14 -int xen_physdisk_access_okay(phys_seg_t *pseg, struct task_struct *p, 14.15 - int operation); 14.16 - 14.17 -#endif /* PHYSDISK_ACES__ */
15.1 --- a/xen/include/xeno/sched.h Tue Oct 14 10:42:54 2003 +0000 15.2 +++ b/xen/include/xeno/sched.h Wed Oct 29 16:27:23 2003 +0000 15.3 @@ -61,8 +61,7 @@ extern struct mm_struct init_mm; 15.4 #define PF_CONSTRUCTED 0x8 /* Has the guest OS been fully built yet? */ 15.5 15.6 #include <xeno/vif.h> 15.7 -#include <xeno/block.h> 15.8 -#include <xeno/segment.h> 15.9 +#include <xeno/vbd.h> 15.10 15.11 /* SMH: replace below when have explicit 'priv' flag or bitmask */ 15.12 #define IS_PRIV(_p) ((_p)->domain == 0) 15.13 @@ -134,7 +133,7 @@ struct task_struct 15.14 the process can do raw access 15.15 to. */ 15.16 spinlock_t physdev_lock; 15.17 - segment_t *segment_list[XEN_MAX_SEGMENTS]; /* xvd */ 15.18 + vbd_t *vbd_list[XEN_MAX_VBDS]; /* vbds for this domain */ 15.19 15.20 /* VM */ 15.21 struct mm_struct mm;
16.1 --- a/xen/include/xeno/segment.h Tue Oct 14 10:42:54 2003 +0000 16.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 16.3 @@ -1,49 +0,0 @@ 16.4 -#ifndef __SEGMENT_H__ 16.5 -#define __SEGMENT_H__ 16.6 - 16.7 -#include <hypervisor-ifs/block.h> 16.8 -#include <hypervisor-ifs/segment.h> 16.9 - 16.10 -/* Describes a physical disk extent. */ 16.11 -typedef struct { 16.12 - unsigned short dev; 16.13 - unsigned short nr_sects; 16.14 - unsigned long sector_number; 16.15 - unsigned long buffer; 16.16 -} phys_seg_t; 16.17 - 16.18 -struct task_struct; 16.19 - 16.20 -void xen_segment_initialize(void); 16.21 -void xen_refresh_segment_list (struct task_struct *p); 16.22 -int xen_segment_create(xv_disk_t *xvd); 16.23 -int xen_segment_delete(struct task_struct *p, int segnr); 16.24 -int xen_segment_map_request( 16.25 - phys_seg_t *pseg, struct task_struct *p, int operation, 16.26 - unsigned short segment_number, 16.27 - unsigned long sect_nr, unsigned long buffer, unsigned short nr_sects); 16.28 - 16.29 -/* 16.30 - * virtual hard disks 16.31 - * 16.32 - * each segment is composed of a number of extents 16.33 - */ 16.34 - 16.35 -typedef struct extent 16.36 -{ 16.37 - int disk; /* A XEN_IDE_DEV or a XEN_SCSI_DEV */ 16.38 - unsigned long offset; /* offset into disk */ 16.39 - unsigned long size; /* size of this extent */ 16.40 -} extent_t; 16.41 - 16.42 -typedef struct segment 16.43 -{ 16.44 - int mode; /* UNUSED, RO, or RW */ 16.45 - int domain; 16.46 - int segment_number; /* segment number for domain */ 16.47 - char key[XEN_SEGMENT_KEYSIZE]; /* for the userspace tools in dom0 */ 16.48 - int num_extents; /* number of extents */ 16.49 - extent_t *extents; 16.50 -} segment_t; 16.51 - 16.52 -#endif
17.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 17.2 +++ b/xen/include/xeno/vbd.h Wed Oct 29 16:27:23 2003 +0000 17.3 @@ -0,0 +1,56 @@ 17.4 +/* 17.5 +** include/xeno/vbd.h: 17.6 +** -- xen internal declarations + prototypes for virtual block devices 17.7 +** 17.8 +*/ 17.9 +#ifndef __VBD_H__ 17.10 +#define __VBD_H__ 17.11 + 17.12 +#include <hypervisor-ifs/block.h> 17.13 +#include <hypervisor-ifs/vbd.h> 17.14 + 17.15 +/* Describes a physical disk extent. */ 17.16 +typedef struct { 17.17 + unsigned short dev; 17.18 + unsigned short nr_sects; 17.19 + unsigned long sector_number; 17.20 + unsigned long buffer; 17.21 +} phys_seg_t; 17.22 + 17.23 +struct task_struct; 17.24 + 17.25 +void xen_vbd_initialize(void); 17.26 +void xen_refresh_vbd_list (struct task_struct *p); 17.27 +int xen_vbd_create(xv_disk_t *xvd); 17.28 +int xen_vbd_delete(struct task_struct *p, int segnr); 17.29 +int xen_vbd_map_request( 17.30 + phys_seg_t *pseg, struct task_struct *p, int operation, 17.31 + unsigned short vbd_number, 17.32 + unsigned long sect_nr, unsigned long buffer, unsigned short nr_sects); 17.33 + 17.34 +typedef struct vbd 17.35 +{ 17.36 + int mode; /* UNUSED, RO, or RW */ 17.37 + int domain; 17.38 + int vbd_number; /* vbd number for domain */ 17.39 + char key[XEN_VBD_KEYSIZE]; /* for the userspace tools in dom0 */ 17.40 + int num_extents; /* number of extents */ 17.41 + extent_t *extents; 17.42 +} vbd_t; 17.43 + 17.44 +#endif 17.45 + 17.46 +#ifndef PHYSDISK_ACES__ 17.47 +#define PHYSDISK_ACES__ 17.48 + 17.49 +struct task_struct; 17.50 + 17.51 +void destroy_physdisk_aces(struct task_struct *p); 17.52 + 17.53 +int xen_physdisk_grant(xp_disk_t *); 17.54 +int xen_physdisk_probe(struct task_struct *requesting_task, 17.55 + physdisk_probebuf_t *); 17.56 +int xen_physdisk_access_okay(phys_seg_t *pseg, struct task_struct *p, 17.57 + int operation); 17.58 + 17.59 +#endif /* PHYSDISK_ACES__ */
18.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/Makefile Tue Oct 14 10:42:54 2003 +0000 18.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/Makefile Wed Oct 29 16:27:23 2003 +0000 18.3 @@ -1,3 +1,3 @@ 18.4 O_TARGET := blk.o 18.5 -obj-y := xl_block.o xl_ide.o xl_scsi.o xl_segment.o xl_segment_proc.o info.o 18.6 +obj-y := xl_block.o xl_ide.o xl_scsi.o xl_vbd.o info.o 18.7 include $(TOPDIR)/Rules.make
19.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.c Tue Oct 14 10:42:54 2003 +0000 19.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.c Wed Oct 29 16:27:23 2003 +0000 19.3 @@ -38,9 +38,14 @@ static unsigned long sg_next_sect; 19.4 19.5 static inline void signal_requests_to_xen(void) 19.6 { 19.7 + block_io_op_t op; 19.8 + 19.9 DISABLE_SCATTERGATHER(); 19.10 blk_ring->req_prod = req_prod; 19.11 - HYPERVISOR_block_io_op(); 19.12 + 19.13 + op.cmd = BLOCK_IO_OP_SIGNAL; 19.14 + HYPERVISOR_block_io_op(&op); 19.15 + return; 19.16 } 19.17 19.18 19.19 @@ -117,7 +122,7 @@ static inline struct gendisk *xldev_to_g 19.20 break; 19.21 19.22 case XLVIRT_MAJOR: 19.23 - gd = xlsegment_gendisk; 19.24 + gd = xlvbd_gendisk; 19.25 break; 19.26 } 19.27 19.28 @@ -194,8 +199,8 @@ int xenolinux_block_ioctl(struct inode * 19.29 19.30 case XLVIRT_MAJOR: 19.31 DPRINTK_IOCTL(" BLKSSZGET: %x 0x%x\n", BLKSSZGET, 19.32 - xlsegment_hwsect(MINOR(dev))); 19.33 - return xlsegment_hwsect(MINOR(dev)); 19.34 + xlsbd_hwsect(MINOR(dev))); 19.35 + return xlvbd_hwsect(MINOR(dev)); 19.36 19.37 default: 19.38 printk(KERN_ALERT "BLKSSZGET ioctl() on bogus disk!\n"); 19.39 @@ -289,7 +294,7 @@ int xenolinux_block_revalidate(kdev_t de 19.40 if ( xdi != NULL ) 19.41 { 19.42 memset(xdi, 0, sizeof(*xdi)); 19.43 - xenolinux_control_msg(XEN_BLOCK_PROBE_SEG, 19.44 + xenolinux_control_msg(XEN_BLOCK_PROBE, 19.45 (char *)xdi, sizeof(*xdi)); 19.46 for ( i = 0; i < xdi->count; i++ ) 19.47 if ( IS_VIRTUAL_XENDEV(xdi->disks[i].device) && 19.48 @@ -313,7 +318,7 @@ int xenolinux_block_revalidate(kdev_t de 19.49 * request block io 19.50 * 19.51 * id: for guest use only. 19.52 - * operation: XEN_BLOCK_{READ,WRITE,PROBE*,SEG*} 19.53 + * operation: XEN_BLOCK_{READ,WRITE,PROBE,VBD*} 19.54 * buffer: buffer to read/write into. this should be a 19.55 * virtual address in the guest os. 19.56 */ 19.57 @@ -335,13 +340,11 @@ static int hypervisor_request(unsigned l 19.58 19.59 switch ( operation ) 19.60 { 19.61 - case XEN_BLOCK_SEG_CREATE: 19.62 - case XEN_BLOCK_SEG_DELETE: 19.63 + case XEN_BLOCK_VBD_CREATE: 19.64 + case XEN_BLOCK_VBD_DELETE: 19.65 case XEN_BLOCK_PHYSDEV_GRANT: 19.66 case XEN_BLOCK_PHYSDEV_PROBE: 19.67 - case XEN_BLOCK_PROBE_BLK: 19.68 - case XEN_BLOCK_PROBE_SEG: 19.69 - case XEN_BLOCK_PROBE_SEG_ALL: 19.70 + case XEN_BLOCK_PROBE: 19.71 if ( RING_FULL ) return 1; 19.72 phys_device = (kdev_t) 0; 19.73 sector_number = 0; 19.74 @@ -507,11 +510,9 @@ static void xlblk_response_int(int irq, 19.75 } 19.76 break; 19.77 19.78 - case XEN_BLOCK_SEG_CREATE: 19.79 - case XEN_BLOCK_SEG_DELETE: 19.80 - case XEN_BLOCK_PROBE_SEG: 19.81 - case XEN_BLOCK_PROBE_SEG_ALL: 19.82 - case XEN_BLOCK_PROBE_BLK: 19.83 + case XEN_BLOCK_VBD_CREATE: 19.84 + case XEN_BLOCK_VBD_DELETE: 19.85 + case XEN_BLOCK_PROBE: 19.86 case XEN_BLOCK_PHYSDEV_GRANT: 19.87 case XEN_BLOCK_PHYSDEV_PROBE: 19.88 xlblk_control_msg_pending = bret->status; 19.89 @@ -570,7 +571,7 @@ int xenolinux_control_msg(int operation, 19.90 19.91 int __init xlblk_init(void) 19.92 { 19.93 - int error; 19.94 + int error; 19.95 19.96 xlblk_control_msg_pending = 0; 19.97 nr_pending = 0; 19.98 @@ -589,7 +590,7 @@ int __init xlblk_init(void) 19.99 19.100 /* Probe for disk information. */ 19.101 memset(&xlblk_disk_info, 0, sizeof(xlblk_disk_info)); 19.102 - error = xenolinux_control_msg(XEN_BLOCK_PROBE_BLK, 19.103 + error = xenolinux_control_msg(XEN_BLOCK_PROBE, 19.104 (char *)&xlblk_disk_info, 19.105 sizeof(xen_disk_info_t)); 19.106 if ( error ) 19.107 @@ -599,10 +600,26 @@ int __init xlblk_init(void) 19.108 goto fail; 19.109 } 19.110 19.111 + { 19.112 + int i; 19.113 + printk(KERN_ALERT "xlblk_init: xen returned info for %d disks\n", 19.114 + xlblk_disk_info.count); 19.115 + for(i=0; i < xlblk_disk_info.count; i++) { 19.116 + printk("%d -- device no=%x, type=%d, capacity=%ldMB\n", 19.117 + i, xlblk_disk_info.disks[i].device, 19.118 + xlblk_disk_info.disks[i].type, 19.119 + xlblk_disk_info.disks[i].capacity >> 11); 19.120 + 19.121 + } 19.122 + 19.123 + } 19.124 /* Pass the information to our fake IDE and SCSI susbystems. */ 19.125 xlide_init(&xlblk_disk_info); 19.126 xlscsi_init(&xlblk_disk_info); 19.127 19.128 + /* And do the same for the 'virtual block device' world */ 19.129 + xlvbd_init(&xlblk_disk_info); 19.130 + 19.131 return 0; 19.132 19.133 fail: 19.134 @@ -611,8 +628,9 @@ int __init xlblk_init(void) 19.135 19.136 static void __exit xlblk_cleanup(void) 19.137 { 19.138 + xlvbd_cleanup(); 19.139 + xlscsi_cleanup(); 19.140 xlide_cleanup(); 19.141 - xlscsi_cleanup(); 19.142 free_irq(XLBLK_RESPONSE_IRQ, NULL); 19.143 } 19.144
20.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.h Tue Oct 14 10:42:54 2003 +0000 20.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.h Wed Oct 29 16:27:23 2003 +0000 20.3 @@ -21,8 +21,8 @@ 20.4 #include <linux/blkdev.h> 20.5 #include <linux/major.h> 20.6 20.7 -#include <asm/hypervisor-ifs/block.h> 20.8 #include <asm/hypervisor-ifs/hypervisor-if.h> 20.9 +#include <asm/hypervisor-ifs/vbd.h> 20.10 #include <asm/io.h> 20.11 #include <asm/atomic.h> 20.12 #include <asm/uaccess.h> 20.13 @@ -101,8 +101,10 @@ extern void xlscsi_cleanup(void); 20.14 extern struct gendisk *xlscsi_gendisk; 20.15 20.16 /* Virtual block-device subsystem. */ 20.17 -extern int xlsegment_hwsect(int minor); 20.18 -extern struct gendisk *xlsegment_gendisk; 20.19 +extern int xlvbd_init(xen_disk_info_t *xdi); 20.20 +extern int xlvbd_hwsect(int minor); 20.21 +extern void xlvbd_cleanup(void); 20.22 +extern struct gendisk *xlvbd_gendisk; 20.23 20.24 extern unsigned short xldev_to_physdev(kdev_t xldev); 20.25 extern kdev_t physdev_to_xldev(unsigned short physdev);
21.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_segment.c Tue Oct 14 10:42:54 2003 +0000 21.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 21.3 @@ -1,182 +0,0 @@ 21.4 -/****************************************************************************** 21.5 - * xl_segment.c 21.6 - * 21.7 - * Xenolinux virtual block-device driver (xvd). 21.8 - * 21.9 - */ 21.10 - 21.11 -#include "xl_block.h" 21.12 - 21.13 -#define MAJOR_NR XLVIRT_MAJOR 21.14 -#include <linux/blk.h> 21.15 - 21.16 -/* Copied from linux/ide.h */ 21.17 -typedef unsigned char byte; 21.18 - 21.19 -#define XLVIRT_MAX 256 21.20 -#define XLVIRT_MAJOR_NAME "xvd" 21.21 -static int xlseg_blksize_size[XLVIRT_MAX]; 21.22 -static int xlseg_hardsect_size[XLVIRT_MAX]; 21.23 -static int xlseg_max_sectors[XLVIRT_MAX]; 21.24 - 21.25 -struct gendisk *xlsegment_gendisk = NULL; 21.26 - 21.27 -static xen_disk_info_t xlseg_disk_info; 21.28 - 21.29 -static struct block_device_operations xlsegment_block_fops = 21.30 -{ 21.31 - open: xenolinux_block_open, 21.32 - release: xenolinux_block_release, 21.33 - ioctl: xenolinux_block_ioctl, 21.34 - check_media_change: xenolinux_block_check, 21.35 - revalidate: xenolinux_block_revalidate, 21.36 -}; 21.37 - 21.38 - 21.39 -int xlsegment_hwsect(int minor) 21.40 -{ 21.41 - return xlseg_hardsect_size[minor]; 21.42 -} 21.43 - 21.44 - 21.45 -int __init xlseg_init(void) 21.46 -{ 21.47 - int i, result, units, minors, disk; 21.48 - xen_disk_info_t *xdi = &xlseg_disk_info; 21.49 - struct gendisk *gd; 21.50 - 21.51 - SET_MODULE_OWNER(&xlsegment_block_fops); 21.52 - 21.53 - /* Probe for disk information. */ 21.54 - memset(xdi, 0, sizeof(*xdi)); 21.55 - xenolinux_control_msg(XEN_BLOCK_PROBE_SEG, (char *)xdi, sizeof(*xdi)); 21.56 - 21.57 - DPRINTK("xvd block device probe:\n"); 21.58 - for ( i = 0; i < xdi->count; i++ ) 21.59 - { 21.60 - DPRINTK(" %2d: device: %d, capacity: %ld\n", 21.61 - i, xdi->disks[i].device, xdi->disks[i].capacity); 21.62 - } 21.63 - 21.64 - result = register_blkdev(XLVIRT_MAJOR, XLVIRT_MAJOR_NAME, 21.65 - &xlsegment_block_fops); 21.66 - if ( result < 0 ) 21.67 - { 21.68 - printk(KERN_ALERT "XL Segment: can't get major %d\n", XLVIRT_MAJOR); 21.69 - return result; 21.70 - } 21.71 - 21.72 - /* Initialize global arrays. */ 21.73 - for (i = 0; i < XLVIRT_MAX; i++) 21.74 - { 21.75 - xlseg_blksize_size[i] = 512; 21.76 - xlseg_hardsect_size[i] = 512; 21.77 - xlseg_max_sectors[i] = 128; 21.78 - } 21.79 - 21.80 - blk_size[XLVIRT_MAJOR] = NULL; 21.81 - blksize_size[XLVIRT_MAJOR] = xlseg_blksize_size; 21.82 - hardsect_size[XLVIRT_MAJOR] = xlseg_hardsect_size; 21.83 - max_sectors[XLVIRT_MAJOR] = xlseg_max_sectors; 21.84 - read_ahead[XLVIRT_MAJOR] = 8; 21.85 - 21.86 - blk_init_queue(BLK_DEFAULT_QUEUE(XLVIRT_MAJOR), do_xlblk_request); 21.87 - 21.88 - /* 21.89 - * Turn off barking 'headactive' mode. We dequeue buffer heads as 21.90 - * soon as we pass them down to Xen. 21.91 - */ 21.92 - blk_queue_headactive(BLK_DEFAULT_QUEUE(XLVIRT_MAJOR), 0); 21.93 - 21.94 - units = XLVIRT_MAX >> XLVIRT_PARTN_SHIFT; 21.95 - 21.96 - /* Construct an appropriate gendisk structure. */ 21.97 - minors = units * (1<<XLVIRT_PARTN_SHIFT); 21.98 - gd = kmalloc(sizeof(struct gendisk), GFP_KERNEL); 21.99 - gd->sizes = kmalloc(minors * sizeof(int), GFP_KERNEL); 21.100 - gd->part = kmalloc(minors * sizeof(struct hd_struct), GFP_KERNEL); 21.101 - gd->major = XLVIRT_MAJOR; 21.102 - gd->major_name = XLVIRT_MAJOR_NAME; 21.103 - gd->minor_shift = XLVIRT_PARTN_SHIFT; 21.104 - gd->max_p = 1<<XLVIRT_PARTN_SHIFT; 21.105 - gd->nr_real = units; 21.106 - gd->real_devices = kmalloc(units * sizeof(xl_disk_t), GFP_KERNEL); 21.107 - gd->next = NULL; 21.108 - gd->fops = &xlsegment_block_fops; 21.109 - gd->de_arr = kmalloc(sizeof(*gd->de_arr) * units, GFP_KERNEL); 21.110 - gd->flags = kmalloc(sizeof(*gd->flags) * units, GFP_KERNEL); 21.111 - memset(gd->sizes, 0, minors * sizeof(int)); 21.112 - memset(gd->part, 0, minors * sizeof(struct hd_struct)); 21.113 - memset(gd->de_arr, 0, sizeof(*gd->de_arr) * units); 21.114 - memset(gd->flags, 0, sizeof(*gd->flags) * units); 21.115 - memset(gd->real_devices, 0, sizeof(xl_disk_t) * units); 21.116 - xlsegment_gendisk = gd; 21.117 - add_gendisk(gd); 21.118 - 21.119 - /* Now register each disk in turn. */ 21.120 - for ( i = 0; i < xdi->count; i++ ) 21.121 - { 21.122 - disk = xdi->disks[i].device & XENDEV_IDX_MASK; 21.123 - 21.124 - if ( !IS_VIRTUAL_XENDEV(xdi->disks[i].device) || 21.125 - (disk >= XLVIRT_DEVS_PER_MAJOR) ) 21.126 - continue; 21.127 - 21.128 - ((xl_disk_t *)gd->real_devices)[disk].capacity = 21.129 - xdi->disks[i].capacity; 21.130 - register_disk(gd, 21.131 - MKDEV(XLVIRT_MAJOR, disk<<XLVIRT_PARTN_SHIFT), 21.132 - 1<<XLVIRT_PARTN_SHIFT, 21.133 - &xlsegment_block_fops, 21.134 - xdi->disks[i].capacity); 21.135 - } 21.136 - 21.137 - printk(KERN_ALERT 21.138 - "XenoLinux Virtual Segment Device Driver installed [device: %d]\n", 21.139 - XLVIRT_MAJOR); 21.140 - 21.141 - return 0; 21.142 -} 21.143 - 21.144 - 21.145 -static void __exit xlseg_cleanup(void) 21.146 -{ 21.147 - if ( xlsegment_gendisk == NULL ) return; 21.148 - 21.149 - blk_cleanup_queue(BLK_DEFAULT_QUEUE(XLVIRT_MAJOR)); 21.150 - 21.151 - xlsegment_gendisk = NULL; 21.152 - 21.153 - read_ahead[XLVIRT_MAJOR] = 0; 21.154 - 21.155 - if ( blksize_size[XLVIRT_MAJOR] != NULL ) 21.156 - { 21.157 - kfree(blksize_size[XLVIRT_MAJOR]); 21.158 - blksize_size[XLVIRT_MAJOR] = NULL; 21.159 - } 21.160 - 21.161 - if ( hardsect_size[XLVIRT_MAJOR] != NULL ) 21.162 - { 21.163 - kfree(hardsect_size[XLVIRT_MAJOR]); 21.164 - hardsect_size[XLVIRT_MAJOR] = NULL; 21.165 - } 21.166 - 21.167 - if ( max_sectors[XLVIRT_MAJOR] != NULL ) 21.168 - { 21.169 - kfree(max_sectors[XLVIRT_MAJOR]); 21.170 - max_sectors[XLVIRT_MAJOR] = NULL; 21.171 - } 21.172 - 21.173 - if ( unregister_blkdev(XLVIRT_MAJOR, XLVIRT_MAJOR_NAME) != 0 ) 21.174 - { 21.175 - printk(KERN_ALERT 21.176 - "XenoLinux Virtual Segment Device Driver" 21.177 - " uninstalled w/ errs\n"); 21.178 - } 21.179 -} 21.180 - 21.181 - 21.182 -#ifdef MODULE 21.183 -module_init(xlseg_init); 21.184 -module_exit(xlseg_cleanup); 21.185 -#endif
22.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_segment_proc.c Tue Oct 14 10:42:54 2003 +0000 22.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 22.3 @@ -1,337 +0,0 @@ 22.4 -/* 22.5 - * xl_segment_proc.c 22.6 - * 22.7 - * XenoLinux virtual disk proc interface . 22.8 - */ 22.9 - 22.10 -#include "xl_block.h" 22.11 -#include <asm/xeno_proc.h> 22.12 -#include <linux/delay.h> 22.13 -#include <linux/seq_file.h> 22.14 -#include <asm/hypervisor-ifs/segment.h> 22.15 - 22.16 -static struct proc_dir_entry *vhd; 22.17 - 22.18 -static void *proc_vhd_next(struct seq_file *s, void *v, loff_t *pos) 22.19 -{ 22.20 - xen_segment_info_t *data; 22.21 - 22.22 - if ( pos != NULL ) 22.23 - ++(*pos); 22.24 - 22.25 - data = v; 22.26 - return data->count-- ? NULL : v; 22.27 -} 22.28 - 22.29 -static void *proc_vhd_start(struct seq_file *s, loff_t *ppos) 22.30 -{ 22.31 - loff_t pos = *ppos; 22.32 - xen_segment_info_t *data; 22.33 - 22.34 - data = kmalloc(sizeof(*data), GFP_KERNEL); 22.35 - xenolinux_control_msg(XEN_BLOCK_PROBE_SEG_ALL, (char *)data, sizeof(*data)); 22.36 - data->count -= pos; 22.37 - 22.38 - if (data->count > 0) 22.39 - return data; 22.40 - 22.41 - kfree(data); 22.42 - return NULL; 22.43 -} 22.44 - 22.45 -static int proc_vhd_show(struct seq_file *s, void *v) 22.46 -{ 22.47 - xen_segment_info_t *data = v; 22.48 - 22.49 - seq_printf (s, 22.50 - "%x %x %10.10s %x\n", 22.51 - data->segments[data->count - 1].domain, 22.52 - data->segments[data->count - 1].seg_nr, 22.53 - data->segments[data->count - 1].key, 22.54 - data->segments[data->count - 1].mode); 22.55 - 22.56 - return 0; 22.57 -} 22.58 - 22.59 -static void proc_vhd_stop(struct seq_file *s, void *v) 22.60 -{ 22.61 - kfree(v); 22.62 -} 22.63 - 22.64 -static struct seq_operations proc_vhd_op = { 22.65 - .start = proc_vhd_start, 22.66 - .next = proc_vhd_next, 22.67 - .show = proc_vhd_show, 22.68 - .stop = proc_vhd_stop 22.69 -}; 22.70 - 22.71 -static int proc_open_vhd(struct inode *inode, struct file *file) 22.72 -{ 22.73 - return seq_open(file, &proc_vhd_op); 22.74 -} 22.75 - 22.76 - 22.77 -#define isdelim(c) \ 22.78 - (c==' '||c==','||c=='\n'||c=='\r'||c=='\t'||c==':'||c=='('||c==')' ? 1 : 0) 22.79 - 22.80 -char *get_string(char *string) /* a bit like strtok */ 22.81 -{ 22.82 - static char *temp; 22.83 - int loop = 0; 22.84 - 22.85 - if (string != NULL) 22.86 - temp = string; 22.87 - else 22.88 - string = temp; 22.89 - 22.90 - try_again: 22.91 - 22.92 - while (!isdelim(string[loop])) 22.93 - { 22.94 - if (string[loop] == '\0') 22.95 - return NULL; 22.96 - loop++; 22.97 - } 22.98 - 22.99 - string[loop] = '\0'; 22.100 - temp = (string + loop + 1); 22.101 - 22.102 - if (loop == 0) 22.103 - { 22.104 - string = temp; 22.105 - goto try_again; 22.106 - } 22.107 - 22.108 - return string; 22.109 -} 22.110 - 22.111 - 22.112 -#define isdigit(c) (c >= '0' && c <= '9' ? 1 : 0) 22.113 -unsigned long to_number(char *string) /* atoi */ 22.114 -{ 22.115 - unsigned long value = 0; 22.116 - 22.117 - if (string == NULL) return 0; 22.118 - 22.119 - while (!isdigit(*string) && *string != '\0') string++; 22.120 - 22.121 - while (isdigit(*string)) 22.122 - { 22.123 - value = value * 10 + (*string - '0'); 22.124 - string++; 22.125 - } 22.126 - 22.127 - return value; 22.128 -} 22.129 - 22.130 -static int proc_write_vhd(struct file *file, const char *buffer, 22.131 - size_t count, loff_t *offp) 22.132 -{ 22.133 - char *local = kmalloc((count + 1) * sizeof(char), GFP_KERNEL); 22.134 - char *string; 22.135 - int loop; 22.136 - xv_disk_t xvd; 22.137 - int res; 22.138 - 22.139 - if( !(start_info.flags & SIF_PRIVILEGED) ) 22.140 - return -EPERM; 22.141 - 22.142 - if (!local) 22.143 - return -ENOMEM; 22.144 - 22.145 - memset (&xvd, 0, sizeof(xvd)); 22.146 - 22.147 - if (copy_from_user(local, buffer, count)) 22.148 - { 22.149 - res = -EFAULT; 22.150 - goto out; 22.151 - } 22.152 - local[count] = '\0'; 22.153 - 22.154 - res = count; 22.155 - string = get_string(local); /* domain specifier */ 22.156 - if (string == NULL) 22.157 - { 22.158 - goto out; 22.159 - } 22.160 - if (*string != 'd' && *string != 'D') 22.161 - { 22.162 - printk (KERN_ALERT 22.163 - "error: domain specifier missing [%s]. should be \"domain\".\n", 22.164 - string); 22.165 - goto out; 22.166 - } 22.167 - 22.168 - string = get_string(NULL); /* domain number */ 22.169 - if (string == NULL) 22.170 - { 22.171 - printk (KERN_ALERT "error: domain number missing\n"); 22.172 - goto out; 22.173 - } 22.174 - xvd.domain = (int) to_number(string); 22.175 - 22.176 - string = get_string(NULL); 22.177 - if (string && (strcmp(string, "RO") == 0 || strcmp(string, "ro") == 0)) 22.178 - { 22.179 - xvd.mode = XEN_DISK_READ_ONLY; 22.180 - } 22.181 - else if (string && (strcmp(string, "RW") == 0 || strcmp(string, "rw") == 0)) 22.182 - { 22.183 - xvd.mode = XEN_DISK_READ_WRITE; 22.184 - } 22.185 - else 22.186 - { 22.187 - printk (KERN_ALERT 22.188 - "error: bad mode [%s]. should be \"rw\" or \"ro\".\n", 22.189 - string); 22.190 - goto out; 22.191 - } 22.192 - 22.193 - string = get_string(NULL); /* look for Segment */ 22.194 - if (string == NULL || (*string != 's' && *string != 'S')) 22.195 - { 22.196 - printk (KERN_ALERT 22.197 - "error: segment specifier missing [%s]. should be \"segment\".\n", 22.198 - string); 22.199 - goto out; 22.200 - } 22.201 - 22.202 - string = get_string(NULL); /* segment number */ 22.203 - if (string == NULL) 22.204 - { 22.205 - printk (KERN_ALERT "error: segment number missing\n"); 22.206 - goto out; 22.207 - } 22.208 - xvd.segment = (int) to_number(string); 22.209 - 22.210 - string = get_string(NULL); /* look for key */ 22.211 - if (string == NULL || (*string != 'k' && *string != 'K')) 22.212 - { 22.213 - printk (KERN_ALERT 22.214 - "error: key specifier missing [%s]. should be \"key\".\n", 22.215 - string); 22.216 - goto out; 22.217 - } 22.218 - string = get_string(NULL); 22.219 - if (string == NULL || strlen(string) != XEN_SEGMENT_KEYSIZE) 22.220 - { 22.221 - printk (KERN_ALERT "error: key missing\n"); 22.222 - goto out; 22.223 - } 22.224 - memcpy(xvd.key, string, XEN_SEGMENT_KEYSIZE); 22.225 - 22.226 - string = get_string(NULL); /* look for Extents */ 22.227 - if (string == NULL || (*string != 'e' && *string != 'E')) 22.228 - { 22.229 - printk (KERN_ALERT 22.230 - "error: extents specifier missing [%s]. should be \"extents\".\n", 22.231 - string); 22.232 - goto out; 22.233 - } 22.234 - 22.235 - string = get_string(NULL); /* number of extents */ 22.236 - if (string == NULL) 22.237 - { 22.238 - printk (KERN_ALERT "error: number of extents missing\n"); 22.239 - goto out; 22.240 - } 22.241 - xvd.ext_count = (int) to_number(string); 22.242 - 22.243 - /* ignore parenthesis */ 22.244 - 22.245 - for (loop = 0; loop < xvd.ext_count; loop++) 22.246 - { 22.247 - string = get_string(NULL); /* look for Disk */ 22.248 - if (string == NULL || (*string != 'd' && *string != 'D')) 22.249 - { 22.250 - printk (KERN_ALERT 22.251 - "hmm, extent disk specifier missing [%s]. should be \"disk\".\n", 22.252 - string); 22.253 - goto out; 22.254 - } 22.255 - string = get_string(NULL); /* disk number */ 22.256 - if (string == NULL) 22.257 - { 22.258 - printk (KERN_ALERT "error: disk number missing\n"); 22.259 - goto out; 22.260 - } 22.261 - xvd.extents[loop].disk = xldev_to_physdev((int) to_number(string)); 22.262 - 22.263 - string = get_string(NULL); /* look for Offset */ 22.264 - if (string == NULL || (*string != 'o' && *string != 'O')) 22.265 - { 22.266 - printk (KERN_ALERT 22.267 - "error: disk offset missing [%s]. should be \"offset\".\n", 22.268 - string); 22.269 - goto out; 22.270 - } 22.271 - string = get_string(NULL); /* offset */ 22.272 - if (string == NULL) 22.273 - { 22.274 - printk (KERN_ALERT "error: offset missing\n"); 22.275 - goto out; 22.276 - } 22.277 - xvd.extents[loop].offset = to_number(string); 22.278 - 22.279 - string = get_string(NULL); /* look for Size */ 22.280 - if (string == NULL || (*string != 's' && *string != 'S')) 22.281 - { 22.282 - printk (KERN_ALERT 22.283 - "error: extent size missing [%s]. should be \"size\".\n", 22.284 - string); 22.285 - goto out; 22.286 - } 22.287 - string = get_string(NULL); /* size */ 22.288 - if (string == NULL) 22.289 - { 22.290 - printk (KERN_ALERT "error: extent size missing\n"); 22.291 - goto out; 22.292 - } 22.293 - xvd.extents[loop].size = to_number(string); 22.294 - } 22.295 - 22.296 - xenolinux_control_msg(XEN_BLOCK_SEG_CREATE, (char *)&xvd, sizeof(xvd)); 22.297 - 22.298 - out: 22.299 - kfree(local); 22.300 - 22.301 - return res; 22.302 -} 22.303 - 22.304 -static struct file_operations proc_vhd_operations = { 22.305 - open: proc_open_vhd, 22.306 - read: seq_read, 22.307 - llseek: seq_lseek, 22.308 - release: seq_release, 22.309 - write: proc_write_vhd 22.310 -}; 22.311 - 22.312 -/******************************************************************/ 22.313 - 22.314 -int __init xlseg_proc_init(void) 22.315 -{ 22.316 - if ( !(start_info.flags & SIF_PRIVILEGED) ) 22.317 - return 0; 22.318 - 22.319 - vhd = create_xeno_proc_entry("vhd", 0600); 22.320 - if ( vhd == NULL ) 22.321 - panic ("xlseg_init: unable to create vhd proc entry\n"); 22.322 - 22.323 - vhd->data = NULL; 22.324 - vhd->proc_fops = &proc_vhd_operations; 22.325 - vhd->owner = THIS_MODULE; 22.326 - 22.327 - return 0; 22.328 -} 22.329 - 22.330 -static void __exit xlseg_proc_cleanup(void) 22.331 -{ 22.332 - if ( vhd == NULL ) return; 22.333 - remove_xeno_proc_entry("vhd"); 22.334 - vhd = NULL; 22.335 -} 22.336 - 22.337 -#ifdef MODULE 22.338 -module_init(xlseg_proc_init); 22.339 -module_exit(xlseg_proc_cleanup); 22.340 -#endif
23.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 23.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_vbd.c Wed Oct 29 16:27:23 2003 +0000 23.3 @@ -0,0 +1,168 @@ 23.4 +/****************************************************************************** 23.5 + * xl_vbd.c 23.6 + * 23.7 + * Xenolinux virtual block-device driver (xvd). 23.8 + * 23.9 + */ 23.10 + 23.11 +#include "xl_block.h" 23.12 + 23.13 +#define MAJOR_NR XLVIRT_MAJOR 23.14 +#include <linux/blk.h> 23.15 + 23.16 +/* Copied from linux/ide.h */ 23.17 +typedef unsigned char byte; 23.18 + 23.19 +#define XLVIRT_MAX 256 23.20 +#define XLVIRT_MAJOR_NAME "xvd" 23.21 +static int xlvbd_blksize_size[XLVIRT_MAX]; 23.22 +static int xlvbd_hardsect_size[XLVIRT_MAX]; 23.23 +static int xlvbd_max_sectors[XLVIRT_MAX]; 23.24 + 23.25 +struct gendisk *xlvbd_gendisk = NULL; 23.26 + 23.27 +static struct block_device_operations xlvbd_block_fops = 23.28 +{ 23.29 + open: xenolinux_block_open, 23.30 + release: xenolinux_block_release, 23.31 + ioctl: xenolinux_block_ioctl, 23.32 + check_media_change: xenolinux_block_check, 23.33 + revalidate: xenolinux_block_revalidate, 23.34 +}; 23.35 + 23.36 + 23.37 +int xlvbd_hwsect(int minor) 23.38 +{ 23.39 + return xlvbd_hardsect_size[minor]; 23.40 +} 23.41 + 23.42 + 23.43 +int __init xlvbd_init(xen_disk_info_t *xdi) 23.44 +{ 23.45 + int i, result, units, minors, disk; 23.46 + struct gendisk *gd; 23.47 + 23.48 + SET_MODULE_OWNER(&xlvbd_block_fops); 23.49 + 23.50 + result = register_blkdev(XLVIRT_MAJOR, XLVIRT_MAJOR_NAME, 23.51 + &xlvbd_block_fops); 23.52 + if ( result < 0 ) 23.53 + { 23.54 + printk(KERN_ALERT "XL VBD: can't get major %d\n", XLVIRT_MAJOR); 23.55 + return result; 23.56 + } 23.57 + 23.58 + /* Initialize global arrays. */ 23.59 + for (i = 0; i < XLVIRT_MAX; i++) 23.60 + { 23.61 + xlvbd_blksize_size[i] = 512; 23.62 + xlvbd_hardsect_size[i] = 512; 23.63 + xlvbd_max_sectors[i] = 128; 23.64 + } 23.65 + 23.66 + blk_size[XLVIRT_MAJOR] = NULL; 23.67 + blksize_size[XLVIRT_MAJOR] = xlvbd_blksize_size; 23.68 + hardsect_size[XLVIRT_MAJOR] = xlvbd_hardsect_size; 23.69 + max_sectors[XLVIRT_MAJOR] = xlvbd_max_sectors; 23.70 + read_ahead[XLVIRT_MAJOR] = 8; 23.71 + 23.72 + blk_init_queue(BLK_DEFAULT_QUEUE(XLVIRT_MAJOR), do_xlblk_request); 23.73 + 23.74 + /* 23.75 + * Turn off barking 'headactive' mode. We dequeue buffer heads as 23.76 + * soon as we pass them down to Xen. 23.77 + */ 23.78 + blk_queue_headactive(BLK_DEFAULT_QUEUE(XLVIRT_MAJOR), 0); 23.79 + 23.80 + units = XLVIRT_MAX >> XLVIRT_PARTN_SHIFT; 23.81 + 23.82 + /* Construct an appropriate gendisk structure. */ 23.83 + minors = units * (1<<XLVIRT_PARTN_SHIFT); 23.84 + gd = kmalloc(sizeof(struct gendisk), GFP_KERNEL); 23.85 + gd->sizes = kmalloc(minors * sizeof(int), GFP_KERNEL); 23.86 + gd->part = kmalloc(minors * sizeof(struct hd_struct), GFP_KERNEL); 23.87 + gd->major = XLVIRT_MAJOR; 23.88 + gd->major_name = XLVIRT_MAJOR_NAME; 23.89 + gd->minor_shift = XLVIRT_PARTN_SHIFT; 23.90 + gd->max_p = 1<<XLVIRT_PARTN_SHIFT; 23.91 + gd->nr_real = units; 23.92 + gd->real_devices = kmalloc(units * sizeof(xl_disk_t), GFP_KERNEL); 23.93 + gd->next = NULL; 23.94 + gd->fops = &xlvbd_block_fops; 23.95 + gd->de_arr = kmalloc(sizeof(*gd->de_arr) * units, GFP_KERNEL); 23.96 + gd->flags = kmalloc(sizeof(*gd->flags) * units, GFP_KERNEL); 23.97 + memset(gd->sizes, 0, minors * sizeof(int)); 23.98 + memset(gd->part, 0, minors * sizeof(struct hd_struct)); 23.99 + memset(gd->de_arr, 0, sizeof(*gd->de_arr) * units); 23.100 + memset(gd->flags, 0, sizeof(*gd->flags) * units); 23.101 + memset(gd->real_devices, 0, sizeof(xl_disk_t) * units); 23.102 + xlvbd_gendisk = gd; 23.103 + add_gendisk(gd); 23.104 + 23.105 + /* Now register each disk in turn. */ 23.106 + for ( i = 0; i < xdi->count; i++ ) 23.107 + { 23.108 + disk = xdi->disks[i].device & XENDEV_IDX_MASK; 23.109 + 23.110 + if ( !IS_VIRTUAL_XENDEV(xdi->disks[i].device) || 23.111 + (disk >= XLVIRT_DEVS_PER_MAJOR) ) 23.112 + continue; 23.113 + 23.114 + ((xl_disk_t *)gd->real_devices)[disk].capacity = 23.115 + xdi->disks[i].capacity; 23.116 + register_disk(gd, 23.117 + MKDEV(XLVIRT_MAJOR, disk<<XLVIRT_PARTN_SHIFT), 23.118 + 1<<XLVIRT_PARTN_SHIFT, 23.119 + &xlvbd_block_fops, 23.120 + xdi->disks[i].capacity); 23.121 + } 23.122 + 23.123 + printk(KERN_ALERT 23.124 + "XenoLinux Virtual Block Device Driver installed [device: %d]\n", 23.125 + XLVIRT_MAJOR); 23.126 + 23.127 + return 0; 23.128 +} 23.129 + 23.130 + 23.131 +void xlvbd_cleanup(void) 23.132 +{ 23.133 + if ( xlvbd_gendisk == NULL ) return; 23.134 + 23.135 + blk_cleanup_queue(BLK_DEFAULT_QUEUE(XLVIRT_MAJOR)); 23.136 + 23.137 + xlvbd_gendisk = NULL; 23.138 + 23.139 + read_ahead[XLVIRT_MAJOR] = 0; 23.140 + 23.141 + if ( blksize_size[XLVIRT_MAJOR] != NULL ) 23.142 + { 23.143 + kfree(blksize_size[XLVIRT_MAJOR]); 23.144 + blksize_size[XLVIRT_MAJOR] = NULL; 23.145 + } 23.146 + 23.147 + if ( hardsect_size[XLVIRT_MAJOR] != NULL ) 23.148 + { 23.149 + kfree(hardsect_size[XLVIRT_MAJOR]); 23.150 + hardsect_size[XLVIRT_MAJOR] = NULL; 23.151 + } 23.152 + 23.153 + if ( max_sectors[XLVIRT_MAJOR] != NULL ) 23.154 + { 23.155 + kfree(max_sectors[XLVIRT_MAJOR]); 23.156 + max_sectors[XLVIRT_MAJOR] = NULL; 23.157 + } 23.158 + 23.159 + if ( unregister_blkdev(XLVIRT_MAJOR, XLVIRT_MAJOR_NAME) != 0 ) 23.160 + { 23.161 + printk(KERN_ALERT 23.162 + "XenoLinux Virtual Block Device Driver" 23.163 + " uninstalled w/ errs\n"); 23.164 + } 23.165 +} 23.166 + 23.167 + 23.168 +#ifdef MODULE 23.169 +module_init(xlvbd_init); 23.170 +module_exit(xlvbd_cleanup); 23.171 +#endif
24.1 --- a/xenolinux-2.4.22-sparse/drivers/block/ll_rw_blk.c Tue Oct 14 10:42:54 2003 +0000 24.2 +++ b/xenolinux-2.4.22-sparse/drivers/block/ll_rw_blk.c Wed Oct 29 16:27:23 2003 +0000 24.3 @@ -1598,8 +1598,6 @@ int __init blk_dev_init(void) 24.4 24.5 #ifdef CONFIG_XENOLINUX_BLOCK 24.6 xlblk_init(); 24.7 - xlseg_init(); 24.8 - xlseg_proc_init(); 24.9 #endif 24.10 24.11 return 0;
25.1 --- a/xenolinux-2.4.22-sparse/fs/partitions/xeno.c Tue Oct 14 10:42:54 2003 +0000 25.2 +++ b/xenolinux-2.4.22-sparse/fs/partitions/xeno.c Wed Oct 29 16:27:23 2003 +0000 25.3 @@ -5,7 +5,7 @@ 25.4 #include <linux/blk.h> 25.5 #include <linux/slab.h> 25.6 #include <linux/genhd.h> 25.7 -#include <asm/hypervisor-ifs/block.h> 25.8 +#include <asm/hypervisor-ifs/vbd.h> 25.9 #include <linux/pagemap.h> 25.10 25.11 #include "check.h"
26.1 --- a/xenolinux-2.4.22-sparse/include/asm-xeno/hypervisor.h Tue Oct 14 10:42:54 2003 +0000 26.2 +++ b/xenolinux-2.4.22-sparse/include/asm-xeno/hypervisor.h Wed Oct 29 16:27:23 2003 +0000 26.3 @@ -281,12 +281,13 @@ static inline int HYPERVISOR_network_op( 26.4 return ret; 26.5 } 26.6 26.7 -static inline int HYPERVISOR_block_io_op(void) 26.8 +static inline int HYPERVISOR_block_io_op(void *block_io_op) 26.9 { 26.10 int ret; 26.11 __asm__ __volatile__ ( 26.12 TRAP_INSTR 26.13 - : "=a" (ret) : "0" (__HYPERVISOR_block_io_op) ); 26.14 + : "=a" (ret) : "0" (__HYPERVISOR_block_io_op), 26.15 + "b" (block_io_op) ); 26.16 26.17 return ret; 26.18 }
27.1 --- a/xenolinux-2.4.22-sparse/include/linux/blk.h Tue Oct 14 10:42:54 2003 +0000 27.2 +++ b/xenolinux-2.4.22-sparse/include/linux/blk.h Wed Oct 29 16:27:23 2003 +0000 27.3 @@ -57,8 +57,6 @@ extern int tapeblock_init(void); 27.4 27.5 #if defined(CONFIG_XENOLINUX_BLOCK) 27.6 extern int xlblk_init(void); 27.7 -extern int xlseg_init(void); 27.8 -extern int xlseg_proc_init(void); 27.9 #endif /* CONFIG_ARCH_XENO */ 27.10 27.11 extern void set_device_ro(kdev_t dev,int flag);