direct-io.hg
changeset 539:3db5e8065e2d
bitkeeper revision 1.299 (3f0abdb0PTDH5mlRlFBRy97wZ-ii8w)
Whitespace fixes.
Whitespace fixes.
author | sos22@labyrinth.cl.cam.ac.uk |
---|---|
date | Tue Jul 08 12:48:48 2003 +0000 (2003-07-08) |
parents | 85abfe09ce32 |
children | 071d669cc50a |
files | BitKeeper/etc/ignore xen/drivers/block/xen_physdisk.c |
line diff
1.1 --- a/BitKeeper/etc/ignore Tue Jul 08 12:46:22 2003 +0000 1.2 +++ b/BitKeeper/etc/ignore Tue Jul 08 12:48:48 2003 +0000 1.3 @@ -431,3 +431,5 @@ tools/control/.checkstyle 1.4 xenolinux-2.4.21-sparse/arch/xeno/drivers/block/xl_block.c~ 1.5 xenolinux-2.4.21-sparse/arch/xeno/drivers/block/xl_physdisk_proc.c.bak 1.6 xenolinux-2.4.21-sparse/arch/xeno/drivers/block/xl_physdisk_proc.c~ 1.7 +xen/drivers/block/xen_block.c~ 1.8 +xen/drivers/block/xen_physdisk.c~
2.1 --- a/xen/drivers/block/xen_physdisk.c Tue Jul 08 12:46:22 2003 +0000 2.2 +++ b/xen/drivers/block/xen_physdisk.c Tue Jul 08 12:48:48 2003 +0000 2.3 @@ -22,43 +22,42 @@ 2.4 (device, start, end, mode) quads which say what it has access to, 2.5 and we fake the logical view on top of that. */ 2.6 struct physdisk_ace { 2.7 - struct list_head list; 2.8 + struct list_head list; 2.9 2.10 - unsigned short device; 2.11 - unsigned short partition; 2.12 - unsigned long start_sect; 2.13 - unsigned long n_sectors; 2.14 - int mode; 2.15 + unsigned short device; 2.16 + unsigned short partition; 2.17 + unsigned long start_sect; 2.18 + unsigned long n_sectors; 2.19 + int mode; 2.20 }; 2.21 2.22 /* Operation is a blkdev constant i.e. READ, WRITE, ... */ 2.23 /* Must be called with p->physdev_lock held. */ 2.24 static struct physdisk_ace *find_ace(const struct task_struct *p, 2.25 unsigned short dev, 2.26 - unsigned long sect, 2.27 - int operation) 2.28 + unsigned long sect, int operation) 2.29 { 2.30 - struct list_head *cur_ace_head; 2.31 - struct physdisk_ace *cur_ace; 2.32 + struct list_head *cur_ace_head; 2.33 + struct physdisk_ace *cur_ace; 2.34 2.35 - list_for_each(cur_ace_head, &p->physdisk_aces) { 2.36 - cur_ace = list_entry(cur_ace_head, struct physdisk_ace, 2.37 - list); 2.38 - DPRINTK("Is [%lx, %lx) good for %lx?\n", 2.39 - cur_ace->start_sect, cur_ace->start_sect + cur_ace->n_sectors, 2.40 - sect); 2.41 - if (sect >= cur_ace->start_sect && 2.42 - sect < cur_ace->start_sect + cur_ace->n_sectors && 2.43 - dev == cur_ace->device && 2.44 - ((operation == READ && (cur_ace->mode & PHYSDISK_MODE_R)) || 2.45 - (operation == WRITE && (cur_ace->mode & PHYSDISK_MODE_W)))) { 2.46 - DPRINTK("Yes.\n"); 2.47 - return cur_ace; 2.48 - } else { 2.49 - DPRINTK("No.\n"); 2.50 + list_for_each(cur_ace_head, &p->physdisk_aces) { 2.51 + cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list); 2.52 + DPRINTK("Is [%lx, %lx) good for %lx?\n", 2.53 + cur_ace->start_sect, 2.54 + cur_ace->start_sect + cur_ace->n_sectors, sect); 2.55 + if (sect >= cur_ace->start_sect 2.56 + && sect < cur_ace->start_sect + cur_ace->n_sectors 2.57 + && dev == cur_ace->device 2.58 + && ((operation == READ && (cur_ace->mode & PHYSDISK_MODE_R)) 2.59 + || (operation == WRITE 2.60 + && (cur_ace->mode & PHYSDISK_MODE_W)))) { 2.61 + DPRINTK("Yes.\n"); 2.62 + return cur_ace; 2.63 + } else { 2.64 + DPRINTK("No.\n"); 2.65 + } 2.66 } 2.67 - } 2.68 - return NULL; 2.69 + return NULL; 2.70 } 2.71 2.72 /* Hold the lock on entry, it remains held on exit. */ 2.73 @@ -70,56 +69,53 @@ static void xen_physdisk_revoke_access(u 2.74 unsigned long n_sectors, 2.75 struct task_struct *p) 2.76 { 2.77 - /* Find every ace which intersects [start_sect, start_sect + 2.78 - n_sectors] and either remove it completely or truncate it 2.79 - down. */ 2.80 - struct list_head *cur_ace_head; 2.81 - struct physdisk_ace *cur_ace, *new_ace; 2.82 - unsigned long kill_zone_end, ace_end; 2.83 + /* Find every ace which intersects [start_sect, start_sect + 2.84 + n_sectors] and either remove it completely or truncate it 2.85 + down. */ 2.86 + struct list_head *cur_ace_head; 2.87 + struct physdisk_ace *cur_ace, *new_ace; 2.88 + unsigned long kill_zone_end, ace_end; 2.89 2.90 - kill_zone_end = start_sect + n_sectors; 2.91 - list_for_each(cur_ace_head, &p->physdisk_aces) { 2.92 - cur_ace = list_entry(cur_ace_head, struct physdisk_ace, 2.93 - list); 2.94 - ace_end = cur_ace->start_sect + cur_ace->n_sectors; 2.95 - if (cur_ace->start_sect >= kill_zone_end || 2.96 - ace_end <= start_sect || 2.97 - cur_ace->device != dev) 2.98 - continue; 2.99 - 2.100 - DPRINTK("Killing ace [%lx, %lx) against kill zone [%lx, %lx)\n", 2.101 - cur_ace->start_sect, ace_end, start_sect, kill_zone_end); 2.102 + kill_zone_end = start_sect + n_sectors; 2.103 + list_for_each(cur_ace_head, &p->physdisk_aces) { 2.104 + cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list); 2.105 + ace_end = cur_ace->start_sect + cur_ace->n_sectors; 2.106 + if (cur_ace->start_sect >= kill_zone_end || 2.107 + ace_end <= start_sect || cur_ace->device != dev) 2.108 + continue; 2.109 + 2.110 + DPRINTK("Killing ace [%lx, %lx) against kill zone [%lx, %lx)\n", 2.111 + cur_ace->start_sect, ace_end, start_sect, kill_zone_end); 2.112 2.113 - if (cur_ace->start_sect >= start_sect && 2.114 - ace_end <= kill_zone_end) { 2.115 - /* ace entirely within kill zone -> kill it */ 2.116 - list_del(cur_ace_head); 2.117 - cur_ace_head = cur_ace_head->prev; 2.118 - kfree(cur_ace); 2.119 - } else if (ace_end <= kill_zone_end) { 2.120 - /* ace start before kill start, ace end in kill zone, 2.121 - move ace end. */ 2.122 - cur_ace->n_sectors = start_sect - cur_ace->start_sect; 2.123 - } else if (cur_ace->start_sect >= start_sect) { 2.124 - /* ace start after kill start, ace end outside kill zone, 2.125 - move ace start. */ 2.126 - cur_ace->start_sect = kill_zone_end; 2.127 - cur_ace->n_sectors = ace_end - cur_ace->start_sect; 2.128 - } else { 2.129 - /* The fun one: the ace entirely includes the kill zone. */ 2.130 - /* Cut the current ace down to just the bit before the kzone, 2.131 - create a new ace for the bit just after it. */ 2.132 - new_ace = kmalloc(sizeof(*cur_ace), GFP_KERNEL); 2.133 - new_ace->device = dev; 2.134 - new_ace->start_sect = kill_zone_end; 2.135 - new_ace->n_sectors = ace_end - kill_zone_end; 2.136 - new_ace->mode = cur_ace->mode; 2.137 + if (cur_ace->start_sect >= start_sect && ace_end <= kill_zone_end) { 2.138 + /* ace entirely within kill zone -> kill it */ 2.139 + list_del(cur_ace_head); 2.140 + cur_ace_head = cur_ace_head->prev; 2.141 + kfree(cur_ace); 2.142 + } else if (ace_end <= kill_zone_end) { 2.143 + /* ace start before kill start, ace end in kill zone, 2.144 + move ace end. */ 2.145 + cur_ace->n_sectors = start_sect - cur_ace->start_sect; 2.146 + } else if (cur_ace->start_sect >= start_sect) { 2.147 + /* ace start after kill start, ace end outside kill zone, 2.148 + move ace start. */ 2.149 + cur_ace->start_sect = kill_zone_end; 2.150 + cur_ace->n_sectors = ace_end - cur_ace->start_sect; 2.151 + } else { 2.152 + /* The fun one: the ace entirely includes the kill zone. */ 2.153 + /* Cut the current ace down to just the bit before the kzone, 2.154 + create a new ace for the bit just after it. */ 2.155 + new_ace = kmalloc(sizeof(*cur_ace), GFP_KERNEL); 2.156 + new_ace->device = dev; 2.157 + new_ace->start_sect = kill_zone_end; 2.158 + new_ace->n_sectors = ace_end - kill_zone_end; 2.159 + new_ace->mode = cur_ace->mode; 2.160 2.161 - cur_ace->n_sectors = start_sect - cur_ace->start_sect; 2.162 + cur_ace->n_sectors = start_sect - cur_ace->start_sect; 2.163 2.164 - list_add(&new_ace->list, cur_ace_head); 2.165 + list_add(&new_ace->list, cur_ace_head); 2.166 + } 2.167 } 2.168 - } 2.169 } 2.170 2.171 /* Hold the lock on entry, it remains held on exit. */ 2.172 @@ -127,159 +123,155 @@ static int xen_physdisk_grant_access(uns 2.173 unsigned short partition, 2.174 unsigned long start_sect, 2.175 unsigned long n_sectors, 2.176 - int mode, 2.177 - struct task_struct *p) 2.178 + int mode, struct task_struct *p) 2.179 { 2.180 - struct physdisk_ace *cur_ace; 2.181 + struct physdisk_ace *cur_ace; 2.182 + 2.183 + /* Make sure it won't overlap with any existing ACEs. */ 2.184 + /* XXX this isn't quite right if the domain already has read access 2.185 + and we try to grant write access, or vice versa. */ 2.186 + xen_physdisk_revoke_access(dev, start_sect, n_sectors, p); 2.187 2.188 - /* Make sure it won't overlap with any existing ACEs. */ 2.189 - /* XXX this isn't quite right if the domain already has read access 2.190 - and we try to grant write access, or vice versa. */ 2.191 - xen_physdisk_revoke_access(dev, start_sect, n_sectors, p); 2.192 - 2.193 - if (mode) { 2.194 - cur_ace = kmalloc(sizeof(*cur_ace), GFP_KERNEL); 2.195 - cur_ace->device = dev; 2.196 - cur_ace->start_sect = start_sect; 2.197 - cur_ace->n_sectors = n_sectors; 2.198 - cur_ace->mode = mode; 2.199 - cur_ace->partition = partition; 2.200 + if (mode) { 2.201 + cur_ace = kmalloc(sizeof(*cur_ace), GFP_KERNEL); 2.202 + cur_ace->device = dev; 2.203 + cur_ace->start_sect = start_sect; 2.204 + cur_ace->n_sectors = n_sectors; 2.205 + cur_ace->mode = mode; 2.206 + cur_ace->partition = partition; 2.207 2.208 - list_add_tail(&cur_ace->list, &p->physdisk_aces); 2.209 - } 2.210 + list_add_tail(&cur_ace->list, &p->physdisk_aces); 2.211 + } 2.212 2.213 - return 0; 2.214 + return 0; 2.215 } 2.216 2.217 -static void xen_physdisk_probe_access(physdisk_probebuf_t *buf, 2.218 +static void xen_physdisk_probe_access(physdisk_probebuf_t * buf, 2.219 struct task_struct *p) 2.220 { 2.221 - int max_aces; 2.222 - int n_aces; 2.223 - struct list_head *cur_ace_head; 2.224 - struct physdisk_ace *cur_ace; 2.225 - int x = 0; 2.226 + int max_aces; 2.227 + int n_aces; 2.228 + struct list_head *cur_ace_head; 2.229 + struct physdisk_ace *cur_ace; 2.230 + int x = 0; 2.231 2.232 - max_aces = buf->n_aces; 2.233 - n_aces = 0; 2.234 - list_for_each(cur_ace_head, &p->physdisk_aces) { 2.235 - x++; 2.236 - if (x >= buf->start_ind) { 2.237 - cur_ace = list_entry(cur_ace_head, struct physdisk_ace, 2.238 - list); 2.239 - buf->entries[n_aces].device = cur_ace->device; 2.240 - buf->entries[n_aces].partition = cur_ace->partition; 2.241 - buf->entries[n_aces].start_sect = cur_ace->start_sect; 2.242 - buf->entries[n_aces].n_sectors = cur_ace->n_sectors; 2.243 - buf->entries[n_aces].mode = cur_ace->mode; 2.244 - n_aces++; 2.245 - if (n_aces >= max_aces) 2.246 - break; 2.247 + max_aces = buf->n_aces; 2.248 + n_aces = 0; 2.249 + list_for_each(cur_ace_head, &p->physdisk_aces) { 2.250 + x++; 2.251 + if (x >= buf->start_ind) { 2.252 + cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list); 2.253 + buf->entries[n_aces].device = cur_ace->device; 2.254 + buf->entries[n_aces].partition = cur_ace->partition; 2.255 + buf->entries[n_aces].start_sect = cur_ace->start_sect; 2.256 + buf->entries[n_aces].n_sectors = cur_ace->n_sectors; 2.257 + buf->entries[n_aces].mode = cur_ace->mode; 2.258 + n_aces++; 2.259 + if (n_aces >= max_aces) 2.260 + break; 2.261 + } 2.262 } 2.263 - } 2.264 - buf->n_aces = n_aces; 2.265 + buf->n_aces = n_aces; 2.266 } 2.267 2.268 -int xen_physdisk_grant(xp_disk_t *xpd_in) 2.269 +int xen_physdisk_grant(xp_disk_t * xpd_in) 2.270 { 2.271 - struct task_struct *p; 2.272 - xp_disk_t *xpd = map_domain_mem(virt_to_phys(xpd_in)); 2.273 - int res; 2.274 + struct task_struct *p; 2.275 + xp_disk_t *xpd = map_domain_mem(virt_to_phys(xpd_in)); 2.276 + int res; 2.277 2.278 - p = current; 2.279 - DPRINTK("Have current.\n"); 2.280 - DPRINTK("Target domain %x\n", xpd->domain); 2.281 + p = current; 2.282 + DPRINTK("Have current.\n"); 2.283 + DPRINTK("Target domain %x\n", xpd->domain); 2.284 2.285 - p = find_domain_by_id(xpd->domain); 2.286 - if (p == NULL) { 2.287 - DPRINTK("Bad domain!\n"); 2.288 - res = 1; 2.289 - goto out; 2.290 - } 2.291 - spin_lock(&p->physdev_lock); 2.292 - res = xen_physdisk_grant_access(xpd->device, 2.293 - xpd->partition, 2.294 - xpd->start_sect, 2.295 - xpd->n_sectors, 2.296 - xpd->mode, 2.297 - p); 2.298 - spin_unlock(&p->physdev_lock); 2.299 - put_task_struct(p); 2.300 + p = find_domain_by_id(xpd->domain); 2.301 + if (p == NULL) { 2.302 + DPRINTK("Bad domain!\n"); 2.303 + res = 1; 2.304 + goto out; 2.305 + } 2.306 + spin_lock(&p->physdev_lock); 2.307 + res = xen_physdisk_grant_access(xpd->device, 2.308 + xpd->partition, 2.309 + xpd->start_sect, 2.310 + xpd->n_sectors, xpd->mode, p); 2.311 + spin_unlock(&p->physdev_lock); 2.312 + put_task_struct(p); 2.313 2.314 - out: 2.315 - unmap_domain_mem(xpd); 2.316 - return res; 2.317 + out: 2.318 + unmap_domain_mem(xpd); 2.319 + return res; 2.320 } 2.321 2.322 int xen_physdisk_probe(struct task_struct *requesting_domain, 2.323 - physdisk_probebuf_t *buf_in) 2.324 + physdisk_probebuf_t * buf_in) 2.325 { 2.326 - struct task_struct *p; 2.327 - physdisk_probebuf_t *buf = map_domain_mem(virt_to_phys(buf_in)); 2.328 - int res; 2.329 + struct task_struct *p; 2.330 + physdisk_probebuf_t *buf = map_domain_mem(virt_to_phys(buf_in)); 2.331 + int res; 2.332 2.333 - if (requesting_domain->domain != 0 && 2.334 - requesting_domain->domain != buf->domain) { 2.335 - res = 1; 2.336 - goto out; 2.337 - } 2.338 + if (requesting_domain->domain != 0 && 2.339 + requesting_domain->domain != buf->domain) { 2.340 + res = 1; 2.341 + goto out; 2.342 + } 2.343 2.344 - p = find_domain_by_id(buf->domain); 2.345 - if (p == NULL) { 2.346 - res = 1; 2.347 - goto out; 2.348 - } 2.349 + p = find_domain_by_id(buf->domain); 2.350 + if (p == NULL) { 2.351 + res = 1; 2.352 + goto out; 2.353 + } 2.354 2.355 - spin_lock(&p->physdev_lock); 2.356 - xen_physdisk_probe_access(buf, p); 2.357 - spin_unlock(&p->physdev_lock); 2.358 - put_task_struct(p); 2.359 + spin_lock(&p->physdev_lock); 2.360 + xen_physdisk_probe_access(buf, p); 2.361 + spin_unlock(&p->physdev_lock); 2.362 + put_task_struct(p); 2.363 2.364 - res = 0; 2.365 - out: 2.366 - unmap_domain_mem(buf); 2.367 - return res; 2.368 + res = 0; 2.369 + out: 2.370 + unmap_domain_mem(buf); 2.371 + return res; 2.372 } 2.373 2.374 -int xen_physdisk_access_okay(phys_seg_t *pseg, struct task_struct *p, 2.375 +int xen_physdisk_access_okay(phys_seg_t * pseg, struct task_struct *p, 2.376 int operation) 2.377 { 2.378 - struct physdisk_ace *cur_ace; 2.379 - unsigned long sect; 2.380 + struct physdisk_ace *cur_ace; 2.381 + unsigned long sect; 2.382 2.383 - DPRINTK("Checking access for domain %d, start sect 0x%lx, length 0x%x.\n", 2.384 - p->domain, pseg->sector_number, pseg->nr_sects); 2.385 + DPRINTK 2.386 + ("Checking access for domain %d, start sect 0x%lx, length 0x%x.\n", 2.387 + p->domain, pseg->sector_number, pseg->nr_sects); 2.388 2.389 - for (sect = pseg->sector_number; 2.390 - sect < pseg->sector_number + pseg->nr_sects; 2.391 - ) { 2.392 - /* XXX this would be a lot faster if the aces were sorted on start 2.393 - address. Also in revoke_access. */ 2.394 - spin_lock(&p->physdev_lock); 2.395 - cur_ace = find_ace(p, pseg->dev, sect, operation); 2.396 - spin_unlock(&p->physdev_lock); 2.397 - if (!cur_ace) { 2.398 - /* Default closed. */ 2.399 - return 0; 2.400 + for (sect = pseg->sector_number; 2.401 + sect < pseg->sector_number + pseg->nr_sects;) { 2.402 + /* XXX this would be a lot faster if the aces were sorted on start 2.403 + address. Also in revoke_access. */ 2.404 + spin_lock(&p->physdev_lock); 2.405 + cur_ace = find_ace(p, pseg->dev, sect, operation); 2.406 + spin_unlock(&p->physdev_lock); 2.407 + if (!cur_ace) { 2.408 + /* Default closed. */ 2.409 + return 0; 2.410 + } 2.411 + sect += 2.412 + MAX(cur_ace->n_sectors, 2.413 + pseg->nr_sects + pseg->sector_number - sect); 2.414 } 2.415 - sect += MAX(cur_ace->n_sectors, pseg->nr_sects + pseg->sector_number - sect); 2.416 - } 2.417 - return 1; 2.418 + return 1; 2.419 } 2.420 2.421 void destroy_physdisk_aces(struct task_struct *p) 2.422 { 2.423 - struct list_head *cur_ace_head, *next_head; 2.424 - struct physdisk_ace *cur_ace; 2.425 + struct list_head *cur_ace_head, *next_head; 2.426 + struct physdisk_ace *cur_ace; 2.427 2.428 - spin_lock(&p->physdev_lock); /* We never release this again. */ 2.429 + spin_lock(&p->physdev_lock); /* We never release this again. */ 2.430 2.431 - for (cur_ace_head = p->physdisk_aces.next; 2.432 - cur_ace_head != &p->physdisk_aces; 2.433 - cur_ace_head = next_head) { 2.434 - cur_ace = list_entry(cur_ace_head, struct physdisk_ace, 2.435 - list); 2.436 - next_head = cur_ace_head->next; 2.437 - kfree(cur_ace); 2.438 - } 2.439 + for (cur_ace_head = p->physdisk_aces.next; 2.440 + cur_ace_head != &p->physdisk_aces; cur_ace_head = next_head) { 2.441 + cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list); 2.442 + next_head = cur_ace_head->next; 2.443 + kfree(cur_ace); 2.444 + } 2.445 }