ia64/xen-unstable

changeset 5365:ab753acf69d1

bitkeeper revision 1.1692 (42a6a969MoQ9te2DJVndEsbImXvW7g)

Merge arcadians.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xeno.bk
into arcadians.cl.cam.ac.uk:/auto/anfs/nos1/akw27/xeno-clone/xeno.bk
author akw27@arcadians.cl.cam.ac.uk
date Wed Jun 08 08:16:41 2005 +0000 (2005-06-08)
parents 731cd57862e5 f409e35891f3
children 5cc07785693b
files tools/blktap/block-async.c tools/blktap/block-async.h tools/blktap/parallax.c tools/blktap/requests-async.c
line diff
     1.1 --- a/tools/blktap/block-async.c	Tue Jun 07 15:30:17 2005 +0000
     1.2 +++ b/tools/blktap/block-async.c	Wed Jun 08 08:16:41 2005 +0000
     1.3 @@ -31,47 +31,47 @@
     1.4   */
     1.5  
     1.6  struct read_args {
     1.7 -	u64 addr;
     1.8 +    u64 addr;
     1.9  };
    1.10  
    1.11  struct write_args {
    1.12 -	u64   addr;
    1.13 -	char *block;
    1.14 +    u64   addr;
    1.15 +    char *block;
    1.16  };
    1.17  
    1.18  struct alloc_args {
    1.19 -	char *block;
    1.20 +    char *block;
    1.21  };
    1.22   
    1.23  struct pending_io_req {
    1.24 -	enum {IO_READ, IO_WRITE, IO_ALLOC, IO_RWAKE, IO_WWAKE} op;
    1.25 -	union {
    1.26 -		struct read_args  r;
    1.27 -		struct write_args w;
    1.28 -		struct alloc_args a;
    1.29 -	} u;
    1.30 -	io_cb_t cb;
    1.31 -	void *param;
    1.32 +    enum {IO_READ, IO_WRITE, IO_ALLOC, IO_RWAKE, IO_WWAKE} op;
    1.33 +    union {
    1.34 +        struct read_args  r;
    1.35 +        struct write_args w;
    1.36 +        struct alloc_args a;
    1.37 +    } u;
    1.38 +    io_cb_t cb;
    1.39 +    void *param;
    1.40  };
    1.41  
    1.42  void radix_lock_init(struct radix_lock *r)
    1.43  {
    1.44 -	int i;
    1.45 -	
    1.46 -	pthread_mutex_init(&r->lock, NULL);
    1.47 -	for (i=0; i < 1024; i++) {
    1.48 -		r->lines[i] = 0;
    1.49 -		r->waiters[i] = NULL;
    1.50 -		r->state[i] = ANY;
    1.51 -	}
    1.52 +    int i;
    1.53 +    
    1.54 +    pthread_mutex_init(&r->lock, NULL);
    1.55 +    for (i=0; i < 1024; i++) {
    1.56 +        r->lines[i] = 0;
    1.57 +        r->waiters[i] = NULL;
    1.58 +        r->state[i] = ANY;
    1.59 +    }
    1.60  }
    1.61  
    1.62  /* maximum outstanding I/O requests issued asynchronously */
    1.63  /* must be a power of 2.*/
    1.64 -#define MAX_PENDING_IO 1024 //1024
    1.65 +#define MAX_PENDING_IO 1024
    1.66  
    1.67  /* how many threads to concurrently issue I/O to the disk. */
    1.68 -#define IO_POOL_SIZE   10 //10
    1.69 +#define IO_POOL_SIZE   10
    1.70  
    1.71  static struct pending_io_req pending_io_reqs[MAX_PENDING_IO];
    1.72  static int pending_io_list[MAX_PENDING_IO];
    1.73 @@ -87,276 +87,268 @@ static pthread_cond_t  pending_io_cond =
    1.74  
    1.75  static void init_pending_io(void)
    1.76  {
    1.77 -	int i;
    1.78 +    int i;
    1.79  	
    1.80 -	for (i=0; i<MAX_PENDING_IO; i++)
    1.81 -		pending_io_list[i] = i;
    1.82 +    for (i=0; i<MAX_PENDING_IO; i++)
    1.83 +        pending_io_list[i] = i;
    1.84  		
    1.85  } 
    1.86  
    1.87  void block_read(u64 addr, io_cb_t cb, void *param)
    1.88  {
    1.89 -	struct pending_io_req *req;
    1.90 -	
    1.91 -	pthread_mutex_lock(&pending_io_lock);
    1.92 -	assert(CAN_PRODUCE_PENDING_IO);
    1.93 -
    1.94 -	req = PENDING_IO_ENT(io_prod++);
    1.95 -	DPRINTF("Produce (R) %lu (%p)\n", io_prod - 1, req);
    1.96 -	req->op = IO_READ;
    1.97 -	req->u.r.addr = addr;
    1.98 -	req->cb = cb;
    1.99 -	req->param = param;
   1.100 -	
   1.101 +    struct pending_io_req *req;
   1.102 +    
   1.103 +    pthread_mutex_lock(&pending_io_lock);
   1.104 +    assert(CAN_PRODUCE_PENDING_IO);
   1.105 +    
   1.106 +    req = PENDING_IO_ENT(io_prod++);
   1.107 +    DPRINTF("Produce (R) %lu (%p)\n", io_prod - 1, req);
   1.108 +    req->op = IO_READ;
   1.109 +    req->u.r.addr = addr;
   1.110 +    req->cb = cb;
   1.111 +    req->param = param;
   1.112 +    
   1.113      pthread_cond_signal(&pending_io_cond);
   1.114 -	pthread_mutex_unlock(&pending_io_lock);	
   1.115 +    pthread_mutex_unlock(&pending_io_lock);	
   1.116  }
   1.117  
   1.118  
   1.119  void block_write(u64 addr, char *block, io_cb_t cb, void *param)
   1.120  {
   1.121 -	struct pending_io_req *req;
   1.122 -	
   1.123 -	pthread_mutex_lock(&pending_io_lock);
   1.124 -	assert(CAN_PRODUCE_PENDING_IO);
   1.125 -
   1.126 -	req = PENDING_IO_ENT(io_prod++);
   1.127 -	DPRINTF("Produce (W) %lu (%p)\n", io_prod - 1, req);
   1.128 -	req->op = IO_WRITE;
   1.129 -	req->u.w.addr  = addr;
   1.130 -	req->u.w.block = block;
   1.131 -	req->cb = cb;
   1.132 -	req->param = param;
   1.133 -	
   1.134 +    struct pending_io_req *req;
   1.135 +    
   1.136 +    pthread_mutex_lock(&pending_io_lock);
   1.137 +    assert(CAN_PRODUCE_PENDING_IO);
   1.138 +    
   1.139 +    req = PENDING_IO_ENT(io_prod++);
   1.140 +    DPRINTF("Produce (W) %lu (%p)\n", io_prod - 1, req);
   1.141 +    req->op = IO_WRITE;
   1.142 +    req->u.w.addr  = addr;
   1.143 +    req->u.w.block = block;
   1.144 +    req->cb = cb;
   1.145 +    req->param = param;
   1.146 +    
   1.147      pthread_cond_signal(&pending_io_cond);
   1.148 -	pthread_mutex_unlock(&pending_io_lock);	
   1.149 +    pthread_mutex_unlock(&pending_io_lock);	
   1.150  }
   1.151  
   1.152  
   1.153  void block_alloc(char *block, io_cb_t cb, void *param)
   1.154  {
   1.155 -	struct pending_io_req *req;
   1.156 +    struct pending_io_req *req;
   1.157  	
   1.158 -	pthread_mutex_lock(&pending_io_lock);
   1.159 -	assert(CAN_PRODUCE_PENDING_IO);
   1.160 -
   1.161 -	req = PENDING_IO_ENT(io_prod++);
   1.162 -	req->op = IO_ALLOC;
   1.163 -	req->u.a.block = block;
   1.164 -	req->cb = cb;
   1.165 -	req->param = param;
   1.166 -	
   1.167 +    pthread_mutex_lock(&pending_io_lock);
   1.168 +    assert(CAN_PRODUCE_PENDING_IO);
   1.169 +    
   1.170 +    req = PENDING_IO_ENT(io_prod++);
   1.171 +    req->op = IO_ALLOC;
   1.172 +    req->u.a.block = block;
   1.173 +    req->cb = cb;
   1.174 +    req->param = param;
   1.175 +    
   1.176      pthread_cond_signal(&pending_io_cond);
   1.177 -	pthread_mutex_unlock(&pending_io_lock);	
   1.178 +    pthread_mutex_unlock(&pending_io_lock);	
   1.179  }
   1.180  
   1.181  void block_rlock(struct radix_lock *r, int row, io_cb_t cb, void *param)
   1.182  {
   1.183 -	struct io_ret ret;
   1.184 -	pthread_mutex_lock(&r->lock);
   1.185 -	
   1.186 -	if (( r->lines[row] >= 0 ) && (r->state[row] != STOP)) {
   1.187 -		r->lines[row]++;
   1.188 -		r->state[row] = READ;
   1.189 -		DPRINTF("RLOCK  : %3d (row: %d)\n", r->lines[row], row);
   1.190 -		pthread_mutex_unlock(&r->lock);
   1.191 -		ret.type = IO_INT_T;
   1.192 -		ret.u.i = 0;
   1.193 -		cb(ret, param);
   1.194 -	} else {
   1.195 -		struct radix_wait **rwc;
   1.196 -		struct radix_wait *rw = 
   1.197 -			(struct radix_wait *) malloc (sizeof(struct radix_wait));
   1.198 -		DPRINTF("RLOCK  : %3d (row: %d) -- DEFERRED!\n", r->lines[row], row);
   1.199 -		rw->type  = RLOCK;
   1.200 -		rw->param = param;
   1.201 -		rw->cb    = cb;
   1.202 -		rw->next  = NULL;
   1.203 -		/* append to waiters list. */
   1.204 -		rwc = &r->waiters[row];
   1.205 -		while (*rwc != NULL) rwc = &(*rwc)->next;
   1.206 -		*rwc = rw;
   1.207 -		pthread_mutex_unlock(&r->lock);
   1.208 -		return;
   1.209 -	}
   1.210 +    struct io_ret ret;
   1.211 +    pthread_mutex_lock(&r->lock);
   1.212 +    
   1.213 +    if (( r->lines[row] >= 0 ) && (r->state[row] != STOP)) {
   1.214 +        r->lines[row]++;
   1.215 +        r->state[row] = READ;
   1.216 +        DPRINTF("RLOCK  : %3d (row: %d)\n", r->lines[row], row);
   1.217 +        pthread_mutex_unlock(&r->lock);
   1.218 +        ret.type = IO_INT_T;
   1.219 +        ret.u.i = 0;
   1.220 +        cb(ret, param);
   1.221 +    } else {
   1.222 +        struct radix_wait **rwc;
   1.223 +        struct radix_wait *rw = 
   1.224 +            (struct radix_wait *) malloc (sizeof(struct radix_wait));
   1.225 +        DPRINTF("RLOCK  : %3d (row: %d) -- DEFERRED!\n", r->lines[row], row);
   1.226 +        rw->type  = RLOCK;
   1.227 +        rw->param = param;
   1.228 +        rw->cb    = cb;
   1.229 +        rw->next  = NULL;
   1.230 +        /* append to waiters list. */
   1.231 +        rwc = &r->waiters[row];
   1.232 +        while (*rwc != NULL) rwc = &(*rwc)->next;
   1.233 +        *rwc = rw;
   1.234 +        pthread_mutex_unlock(&r->lock);
   1.235 +        return;
   1.236 +    }
   1.237  }
   1.238  
   1.239  
   1.240  void block_wlock(struct radix_lock *r, int row, io_cb_t cb, void *param)
   1.241  {
   1.242 -	struct io_ret ret;
   1.243 -	pthread_mutex_lock(&r->lock);
   1.244 -	
   1.245 -	/* the second check here is redundant -- just here for debugging now. */
   1.246 -	if ((r->state[row] == ANY) && ( r->lines[row] == 0 )) {
   1.247 -		r->state[row] = STOP;
   1.248 -		r->lines[row] = -1;
   1.249 -		DPRINTF("WLOCK  : %3d (row: %d)\n", r->lines[row], row);
   1.250 -		pthread_mutex_unlock(&r->lock);
   1.251 -		ret.type = IO_INT_T;
   1.252 -		ret.u.i = 0;
   1.253 -		cb(ret, param);
   1.254 -	} else {
   1.255 -		struct radix_wait **rwc;
   1.256 -		struct radix_wait *rw = 
   1.257 -			(struct radix_wait *) malloc (sizeof(struct radix_wait));
   1.258 -		DPRINTF("WLOCK  : %3d (row: %d) -- DEFERRED!\n", r->lines[row], row);
   1.259 -		rw->type  = WLOCK;
   1.260 -		rw->param = param;
   1.261 -		rw->cb    = cb;
   1.262 -		rw->next  = NULL;
   1.263 -		/* append to waiters list. */
   1.264 -		rwc = &r->waiters[row];
   1.265 -		while (*rwc != NULL) rwc = &(*rwc)->next;
   1.266 -		*rwc = rw;
   1.267 -		pthread_mutex_unlock(&r->lock);
   1.268 -		return;
   1.269 -	}
   1.270 +    struct io_ret ret;
   1.271 +    pthread_mutex_lock(&r->lock);
   1.272 +    
   1.273 +    /* the second check here is redundant -- just here for debugging now. */
   1.274 +    if ((r->state[row] == ANY) && ( r->lines[row] == 0 )) {
   1.275 +        r->state[row] = STOP;
   1.276 +        r->lines[row] = -1;
   1.277 +        DPRINTF("WLOCK  : %3d (row: %d)\n", r->lines[row], row);
   1.278 +        pthread_mutex_unlock(&r->lock);
   1.279 +        ret.type = IO_INT_T;
   1.280 +        ret.u.i = 0;
   1.281 +        cb(ret, param);
   1.282 +    } else {
   1.283 +        struct radix_wait **rwc;
   1.284 +        struct radix_wait *rw = 
   1.285 +            (struct radix_wait *) malloc (sizeof(struct radix_wait));
   1.286 +        DPRINTF("WLOCK  : %3d (row: %d) -- DEFERRED!\n", r->lines[row], row);
   1.287 +        rw->type  = WLOCK;
   1.288 +        rw->param = param;
   1.289 +        rw->cb    = cb;
   1.290 +        rw->next  = NULL;
   1.291 +        /* append to waiters list. */
   1.292 +        rwc = &r->waiters[row];
   1.293 +        while (*rwc != NULL) rwc = &(*rwc)->next;
   1.294 +        *rwc = rw;
   1.295 +        pthread_mutex_unlock(&r->lock);
   1.296 +        return;
   1.297 +    }
   1.298  	
   1.299  }
   1.300  
   1.301  /* called with radix_lock locked and lock count of zero. */
   1.302  static void wake_waiters(struct radix_lock *r, int row)
   1.303  {
   1.304 -	struct pending_io_req *req;
   1.305 -	struct radix_wait *rw;
   1.306 -	
   1.307 -	DPRINTF("prewake\n");
   1.308 -	if (r->lines[row] != 0) return;
   1.309 -	if (r->waiters[row] == NULL) {DPRINTF("nowaiters!\n");return;} 
   1.310 -	
   1.311 -	DPRINTF("wake\n");
   1.312 -	if (r->waiters[row]->type == WLOCK) {
   1.313 -		rw = r->waiters[row];
   1.314 -		pthread_mutex_lock(&pending_io_lock);
   1.315 -		assert(CAN_PRODUCE_PENDING_IO);
   1.316 -
   1.317 -		req = PENDING_IO_ENT(io_prod++);
   1.318 -		DPRINTF("Produce (WWAKE) %lu (%p)\n", io_prod - 1, req);
   1.319 -		req->op    = IO_WWAKE;
   1.320 -		req->cb    = rw->cb;
   1.321 -		req->param = rw->param;
   1.322 -		r->lines[row] = -1; /* write lock the row. */
   1.323 -		r->state[row] = STOP;
   1.324 -		r->waiters[row] = rw->next;
   1.325 -		free(rw);
   1.326 -		pthread_mutex_unlock(&pending_io_lock);
   1.327 -	} else /* RLOCK */ {
   1.328 -		while ((r->waiters[row] != NULL) && (r->waiters[row]->type == RLOCK)) {
   1.329 -			rw = r->waiters[row];
   1.330 -			pthread_mutex_lock(&pending_io_lock);
   1.331 -			assert(CAN_PRODUCE_PENDING_IO);
   1.332 -	
   1.333 -			req = PENDING_IO_ENT(io_prod++);
   1.334 -			DPRINTF("Produce (RWAKE) %lu (%p)\n", io_prod - 1, req);
   1.335 -			req->op    = IO_RWAKE;
   1.336 -			req->cb    = rw->cb;
   1.337 -			req->param = rw->param;
   1.338 -			r->lines[row]++; /* read lock the row. */
   1.339 -			r->state[row] = READ; 
   1.340 -			r->waiters[row] = rw->next;
   1.341 -			free(rw);
   1.342 -			pthread_mutex_unlock(&pending_io_lock);
   1.343 -		}
   1.344 -		if (r->waiters[row] != NULL) /* There is a write queued still */
   1.345 -			r->state[row] = STOP;
   1.346 -	}	
   1.347 -	
   1.348 -	DPRINTF("wakedone\n");
   1.349 -	DPRINTF("prod: %lu cons: %lu free: %lu\n", io_prod, io_cons, io_free);
   1.350 -	pthread_mutex_lock(&pending_io_lock);
   1.351 +    struct pending_io_req *req;
   1.352 +    struct radix_wait *rw;
   1.353 +    
   1.354 +    if (r->lines[row] != 0) return;
   1.355 +    if (r->waiters[row] == NULL) return; 
   1.356 +    
   1.357 +    if (r->waiters[row]->type == WLOCK) {
   1.358 +
   1.359 +        rw = r->waiters[row];
   1.360 +        pthread_mutex_lock(&pending_io_lock);
   1.361 +        assert(CAN_PRODUCE_PENDING_IO);
   1.362 +        
   1.363 +        req = PENDING_IO_ENT(io_prod++);
   1.364 +        req->op    = IO_WWAKE;
   1.365 +        req->cb    = rw->cb;
   1.366 +        req->param = rw->param;
   1.367 +        r->lines[row] = -1; /* write lock the row. */
   1.368 +        r->state[row] = STOP;
   1.369 +        r->waiters[row] = rw->next;
   1.370 +        free(rw);
   1.371 +        pthread_mutex_unlock(&pending_io_lock);
   1.372 +    
   1.373 +    } else /* RLOCK */ {
   1.374 +
   1.375 +        while ((r->waiters[row] != NULL) && (r->waiters[row]->type == RLOCK)) {
   1.376 +            rw = r->waiters[row];
   1.377 +            pthread_mutex_lock(&pending_io_lock);
   1.378 +            assert(CAN_PRODUCE_PENDING_IO);
   1.379 +            
   1.380 +            req = PENDING_IO_ENT(io_prod++);
   1.381 +            req->op    = IO_RWAKE;
   1.382 +            req->cb    = rw->cb;
   1.383 +            req->param = rw->param;
   1.384 +            r->lines[row]++; /* read lock the row. */
   1.385 +            r->state[row] = READ; 
   1.386 +            r->waiters[row] = rw->next;
   1.387 +            free(rw);
   1.388 +            pthread_mutex_unlock(&pending_io_lock);
   1.389 +        }
   1.390 +
   1.391 +        if (r->waiters[row] != NULL) /* There is a write queued still */
   1.392 +            r->state[row] = STOP;
   1.393 +    }	
   1.394 +    
   1.395 +    pthread_mutex_lock(&pending_io_lock);
   1.396      pthread_cond_signal(&pending_io_cond);
   1.397 -	pthread_mutex_unlock(&pending_io_lock);
   1.398 +    pthread_mutex_unlock(&pending_io_lock);
   1.399  }
   1.400  
   1.401  void block_runlock(struct radix_lock *r, int row, io_cb_t cb, void *param)
   1.402  {
   1.403 -	struct io_ret ret;
   1.404 +    struct io_ret ret;
   1.405  	
   1.406 -	pthread_mutex_lock(&r->lock);
   1.407 -	assert(r->lines[row] > 0); /* try to catch misuse. */
   1.408 -	r->lines[row]--;
   1.409 -	DPRINTF("RUNLOCK: %3d (row: %d)\n", r->lines[row], row);
   1.410 -	if (r->lines[row] == 0) {
   1.411 -		r->state[row] = ANY;
   1.412 -		wake_waiters(r, row);
   1.413 -	}
   1.414 -	pthread_mutex_unlock(&r->lock);
   1.415 -	cb(ret, param);
   1.416 +    pthread_mutex_lock(&r->lock);
   1.417 +    assert(r->lines[row] > 0); /* try to catch misuse. */
   1.418 +    r->lines[row]--;
   1.419 +    if (r->lines[row] == 0) {
   1.420 +        r->state[row] = ANY;
   1.421 +        wake_waiters(r, row);
   1.422 +    }
   1.423 +    pthread_mutex_unlock(&r->lock);
   1.424 +    cb(ret, param);
   1.425  }
   1.426  
   1.427  void block_wunlock(struct radix_lock *r, int row, io_cb_t cb, void *param)
   1.428  {
   1.429 -	struct io_ret ret;
   1.430 -	
   1.431 -	pthread_mutex_lock(&r->lock);
   1.432 -	assert(r->lines[row] == -1); /* try to catch misuse. */
   1.433 -	r->lines[row] = 0;
   1.434 -	r->state[row] = ANY;
   1.435 -	DPRINTF("WUNLOCK: %3d (row: %d)\n", r->lines[row], row);
   1.436 -	wake_waiters(r, row);
   1.437 -	pthread_mutex_unlock(&r->lock);
   1.438 -	cb(ret, param);
   1.439 +    struct io_ret ret;
   1.440 +    
   1.441 +    pthread_mutex_lock(&r->lock);
   1.442 +    assert(r->lines[row] == -1); /* try to catch misuse. */
   1.443 +    r->lines[row] = 0;
   1.444 +    r->state[row] = ANY;
   1.445 +    wake_waiters(r, row);
   1.446 +    pthread_mutex_unlock(&r->lock);
   1.447 +    cb(ret, param);
   1.448  }
   1.449  
   1.450  /* consumer calls */
   1.451  static void do_next_io_req(struct pending_io_req *req)
   1.452  {
   1.453 -	struct io_ret          ret;
   1.454 -	void  *param;
   1.455 +    struct io_ret          ret;
   1.456 +    void  *param;
   1.457 +    
   1.458 +    switch (req->op) {
   1.459 +    case IO_READ:
   1.460 +        ret.type = IO_BLOCK_T;
   1.461 +        ret.u.b  = readblock(req->u.r.addr);
   1.462 +        break;
   1.463 +    case IO_WRITE:
   1.464 +        ret.type = IO_INT_T;
   1.465 +        ret.u.i  = writeblock(req->u.w.addr, req->u.w.block);
   1.466 +        DPRINTF("wrote %d at %Lu\n", *(int *)(req->u.w.block), req->u.w.addr);
   1.467 +        break;
   1.468 +    case IO_ALLOC:
   1.469 +        ret.type = IO_ADDR_T;
   1.470 +        ret.u.a  = allocblock(req->u.a.block);
   1.471 +        break;
   1.472 +    case IO_RWAKE:
   1.473 +        DPRINTF("WAKE DEFERRED RLOCK!\n");
   1.474 +        ret.type = IO_INT_T;
   1.475 +        ret.u.i  = 0;
   1.476 +        break;
   1.477 +    case IO_WWAKE:
   1.478 +        DPRINTF("WAKE DEFERRED WLOCK!\n");
   1.479 +        ret.type = IO_INT_T;
   1.480 +        ret.u.i  = 0;
   1.481 +        break;
   1.482 +    default:
   1.483 +        DPRINTF("Unknown IO operation on pending list!\n");
   1.484 +        return;
   1.485 +    }
   1.486 +    
   1.487 +    param = req->param;
   1.488 +    pthread_mutex_lock(&pending_io_lock);
   1.489 +    pending_io_list[PENDING_IO_MASK(io_free++)] = PENDING_IO_IDX(req);
   1.490 +    pthread_mutex_unlock(&pending_io_lock);
   1.491  	
   1.492 -	switch (req->op) {
   1.493 -	case IO_READ:
   1.494 -		ret.type = IO_BLOCK_T;
   1.495 -		ret.u.b  = readblock(req->u.r.addr);
   1.496 -		break;
   1.497 -	case IO_WRITE:
   1.498 -		ret.type = IO_INT_T;
   1.499 -		ret.u.i  = writeblock(req->u.w.addr, req->u.w.block);
   1.500 -		DPRINTF("wrote %d at %Lu\n", *(int *)(req->u.w.block), req->u.w.addr);
   1.501 -		break;
   1.502 -	case IO_ALLOC:
   1.503 -		ret.type = IO_ADDR_T;
   1.504 -		ret.u.a  = allocblock(req->u.a.block);
   1.505 -		break;
   1.506 -	case IO_RWAKE:
   1.507 -		DPRINTF("WAKE DEFERRED RLOCK!\n");
   1.508 -		ret.type = IO_INT_T;
   1.509 -		ret.u.i  = 0;
   1.510 -		break;
   1.511 -	case IO_WWAKE:
   1.512 -		DPRINTF("WAKE DEFERRED WLOCK!\n");
   1.513 -		ret.type = IO_INT_T;
   1.514 -		ret.u.i  = 0;
   1.515 -		break;
   1.516 -	default:
   1.517 -		DPRINTF("Unknown IO operation on pending list!\n");
   1.518 -		return;
   1.519 -	}
   1.520 -	
   1.521 -	param = req->param;
   1.522 -	DPRINTF("freeing idx %d to slot %lu.\n", PENDING_IO_IDX(req), PENDING_IO_MASK(io_free));
   1.523 -	pthread_mutex_lock(&pending_io_lock);
   1.524 -	pending_io_list[PENDING_IO_MASK(io_free++)] = PENDING_IO_IDX(req);
   1.525 -	DPRINTF("       : prod: %lu cons: %lu free: %lu\n", io_prod, io_cons, io_free);
   1.526 -	pthread_mutex_unlock(&pending_io_lock);
   1.527 -	
   1.528 -	assert(req->cb != NULL);
   1.529 -	req->cb(ret, param);
   1.530 -		
   1.531 +    assert(req->cb != NULL);
   1.532 +    req->cb(ret, param);
   1.533 +    
   1.534  }
   1.535  
   1.536  void *io_thread(void *param) 
   1.537  {
   1.538 -	int tid;
   1.539 -	struct pending_io_req *req;
   1.540 -	
   1.541 -	/* Set this thread's tid. */
   1.542 +    int tid;
   1.543 +    struct pending_io_req *req;
   1.544 +    
   1.545 +    /* Set this thread's tid. */
   1.546      tid = *(int *)param;
   1.547      free(param);
   1.548      
   1.549 -    DPRINTF("IOT %2d started.\n", tid);
   1.550 -    
   1.551  start:
   1.552      pthread_mutex_lock(&pending_io_lock);
   1.553      while (io_prod == io_cons) {
   1.554 @@ -369,15 +361,12 @@ start:
   1.555          goto start;
   1.556      }
   1.557      
   1.558 -	req = PENDING_IO_ENT(io_cons++);
   1.559 -	DPRINTF("IOT %2d has req %04d(%p).\n", tid, PENDING_IO_IDX(req), req);
   1.560 -	DPRINTF("       : prod: %lu cons: %lu free: %lu\n", io_prod, io_cons, io_free);
   1.561 -	pthread_mutex_unlock(&pending_io_lock);
   1.562 -	
   1.563 +    req = PENDING_IO_ENT(io_cons++);
   1.564 +    pthread_mutex_unlock(&pending_io_lock);
   1.565  	
   1.566      do_next_io_req(req);
   1.567      
   1.568 -	goto start;
   1.569 +    goto start;
   1.570  	
   1.571  }
   1.572  
   1.573 @@ -385,9 +374,9 @@ static pthread_t io_pool[IO_POOL_SIZE];
   1.574  void start_io_threads(void)
   1.575  
   1.576  {	
   1.577 -	int i, tid=0;
   1.578 -	
   1.579 -	 for (i=0; i < IO_POOL_SIZE; i++) {
   1.580 +    int i, tid=0;
   1.581 +    
   1.582 +    for (i=0; i < IO_POOL_SIZE; i++) {
   1.583          int ret, *t;
   1.584          t = (int *)malloc(sizeof(int));
   1.585          *t = tid++;
   1.586 @@ -399,6 +388,6 @@ void start_io_threads(void)
   1.587  
   1.588  void init_block_async(void)
   1.589  {
   1.590 -	init_pending_io();
   1.591 -	start_io_threads();
   1.592 +    init_pending_io();
   1.593 +    start_io_threads();
   1.594  }
     2.1 --- a/tools/blktap/block-async.h	Tue Jun 07 15:30:17 2005 +0000
     2.2 +++ b/tools/blktap/block-async.h	Wed Jun 08 08:16:41 2005 +0000
     2.3 @@ -12,29 +12,29 @@
     2.4  
     2.5  struct io_ret
     2.6  {
     2.7 -	enum {IO_ADDR_T, IO_BLOCK_T, IO_INT_T} type;
     2.8 -	union {
     2.9 -		u64   a;
    2.10 -		char *b;
    2.11 -		int   i;
    2.12 -	} u;
    2.13 +    enum {IO_ADDR_T, IO_BLOCK_T, IO_INT_T} type;
    2.14 +    union {
    2.15 +        u64   a;
    2.16 +        char *b;
    2.17 +        int   i;
    2.18 +    } u;
    2.19  };
    2.20  
    2.21  typedef void (*io_cb_t)(struct io_ret r, void *param);
    2.22  
    2.23  /* per-vdi lock structures to make sure requests run in a safe order. */
    2.24  struct radix_wait {
    2.25 -	enum {RLOCK, WLOCK} type;
    2.26 -	io_cb_t  cb;
    2.27 -	void    *param;
    2.28 -	struct radix_wait *next;
    2.29 +    enum {RLOCK, WLOCK} type;
    2.30 +    io_cb_t  cb;
    2.31 +    void    *param;
    2.32 +    struct radix_wait *next;
    2.33  };
    2.34  
    2.35  struct radix_lock {
    2.36 -	pthread_mutex_t lock;
    2.37 -	int                    lines[1024];
    2.38 -	struct radix_wait     *waiters[1024];
    2.39 -	enum {ANY, READ, STOP} state[1024];
    2.40 +    pthread_mutex_t lock;
    2.41 +    int                    lines[1024];
    2.42 +    struct radix_wait     *waiters[1024];
    2.43 +    enum {ANY, READ, STOP} state[1024];
    2.44  };
    2.45  void radix_lock_init(struct radix_lock *r);
    2.46  
    2.47 @@ -49,20 +49,20 @@ void init_block_async(void);
    2.48  
    2.49  static inline u64 IO_ADDR(struct io_ret r)
    2.50  {
    2.51 -	assert(r.type == IO_ADDR_T);
    2.52 -	return r.u.a;
    2.53 +    assert(r.type == IO_ADDR_T);
    2.54 +    return r.u.a;
    2.55  }
    2.56  
    2.57  static inline char *IO_BLOCK(struct io_ret r)
    2.58  {
    2.59 -	assert(r.type == IO_BLOCK_T);
    2.60 -	return r.u.b;
    2.61 +    assert(r.type == IO_BLOCK_T);
    2.62 +    return r.u.b;
    2.63  }
    2.64  
    2.65  static inline int IO_INT(struct io_ret r)
    2.66  {
    2.67 -	assert(r.type == IO_INT_T);
    2.68 -	return r.u.i;
    2.69 +    assert(r.type == IO_INT_T);
    2.70 +    return r.u.i;
    2.71  }
    2.72  
    2.73  
     3.1 --- a/tools/blktap/parallax.c	Tue Jun 07 15:30:17 2005 +0000
     3.2 +++ b/tools/blktap/parallax.c	Wed Jun 08 08:16:41 2005 +0000
     3.3 @@ -328,33 +328,33 @@ typedef struct {
     3.4  pending_t pending_list[MAX_REQUESTS];
     3.5  
     3.6  struct cb_param {
     3.7 -	pending_t *pent;
     3.8 -	int       segment;
     3.9 -	u64       sector; 
    3.10 -	u64       vblock; /* for debug printing -- can be removed. */
    3.11 +    pending_t *pent;
    3.12 +    int       segment;
    3.13 +    u64       sector; 
    3.14 +    u64       vblock; /* for debug printing -- can be removed. */
    3.15  };
    3.16  
    3.17  static void read_cb(struct io_ret r, void *in_param)
    3.18  {
    3.19 -	struct cb_param *param = (struct cb_param *)in_param;
    3.20 -	pending_t *p = param->pent;
    3.21 -	int segment = param->segment;
    3.22 -	blkif_request_t *req = p->req;
    3.23 +    struct cb_param *param = (struct cb_param *)in_param;
    3.24 +    pending_t *p = param->pent;
    3.25 +    int segment = param->segment;
    3.26 +    blkif_request_t *req = p->req;
    3.27      unsigned long size, offset, start;
    3.28 -	char *dpage, *spage;
    3.29 +    char *dpage, *spage;
    3.30  	
    3.31 -	spage  = IO_BLOCK(r);
    3.32 -	if (spage == NULL) { p->error++; goto finish; }
    3.33 -	dpage  = (char *)MMAP_VADDR(ID_TO_IDX(req->id), segment);
    3.34 +    spage  = IO_BLOCK(r);
    3.35 +    if (spage == NULL) { p->error++; goto finish; }
    3.36 +    dpage  = (char *)MMAP_VADDR(ID_TO_IDX(req->id), segment);
    3.37      
    3.38      /* Calculate read size and offset within the read block. */
    3.39  
    3.40      offset = (param->sector << SECTOR_SHIFT) % BLOCK_SIZE;
    3.41      size = ( blkif_last_sect (req->frame_and_sects[segment]) -
    3.42               blkif_first_sect(req->frame_and_sects[segment]) + 1
    3.43 -           ) << SECTOR_SHIFT;
    3.44 +        ) << SECTOR_SHIFT;
    3.45      start = blkif_first_sect(req->frame_and_sects[segment]) 
    3.46 -            << SECTOR_SHIFT;
    3.47 +        << SECTOR_SHIFT;
    3.48  
    3.49      DPRINTF("ParallaxRead: sect: %lld (%ld,%ld),  "
    3.50              "vblock %llx, "
    3.51 @@ -371,23 +371,23 @@ static void read_cb(struct io_ret r, voi
    3.52      pthread_mutex_lock(&p->mutex);
    3.53      p->count--;
    3.54      
    3.55 -	if (p->count == 0) {
    3.56 +    if (p->count == 0) {
    3.57      	blkif_response_t *rsp;
    3.58      	
    3.59          rsp = (blkif_response_t *)req;
    3.60          rsp->id = req->id;
    3.61          rsp->operation = BLKIF_OP_READ;
    3.62      	if (p->error == 0) {
    3.63 -	        rsp->status = BLKIF_RSP_OKAY;
    3.64 +            rsp->status = BLKIF_RSP_OKAY;
    3.65      	} else {
    3.66 -    		rsp->status = BLKIF_RSP_ERROR;
    3.67 +            rsp->status = BLKIF_RSP_ERROR;
    3.68      	}
    3.69          blktap_inject_response(rsp);       
    3.70      }
    3.71      
    3.72      pthread_mutex_unlock(&p->mutex);
    3.73  	
    3.74 -	free(param); /* TODO: replace with cached alloc/dealloc */
    3.75 +    free(param); /* TODO: replace with cached alloc/dealloc */
    3.76  }	
    3.77  
    3.78  int parallax_read(blkif_request_t *req, blkif_t *blkif)
    3.79 @@ -414,21 +414,20 @@ int parallax_read(blkif_request_t *req, 
    3.80          pthread_t tid;
    3.81          int ret;
    3.82          struct cb_param *p;
    3.83 -
    3.84 -	    /* Round the requested segment to a block address. */
    3.85 -	    sector  = req->sector_number + (8*i);
    3.86 -	    vblock = (sector << SECTOR_SHIFT) >> BLOCK_SHIFT;
    3.87 -
    3.88 -		/* TODO: Replace this call to malloc with a cached allocation */
    3.89 -		p = (struct cb_param *)malloc(sizeof(struct cb_param));
    3.90 -		p->pent = pent;
    3.91 -		p->sector = sector; 
    3.92 -		p->segment = i;     
    3.93 -		p->vblock = vblock; /* dbg */
    3.94 -		
    3.95 -	    /* Get that block from the store. */
    3.96 -	    async_read(vdi, vblock, read_cb, (void *)p);
    3.97 -
    3.98 +        
    3.99 +        /* Round the requested segment to a block address. */
   3.100 +        sector  = req->sector_number + (8*i);
   3.101 +        vblock = (sector << SECTOR_SHIFT) >> BLOCK_SHIFT;
   3.102 +        
   3.103 +        /* TODO: Replace this call to malloc with a cached allocation */
   3.104 +        p = (struct cb_param *)malloc(sizeof(struct cb_param));
   3.105 +        p->pent = pent;
   3.106 +        p->sector = sector; 
   3.107 +        p->segment = i;     
   3.108 +        p->vblock = vblock; /* dbg */
   3.109 +        
   3.110 +        /* Get that block from the store. */
   3.111 +        async_read(vdi, vblock, read_cb, (void *)p);    
   3.112      }
   3.113      
   3.114      return BLKTAP_STOLEN;
   3.115 @@ -444,33 +443,33 @@ err:
   3.116  
   3.117  static void write_cb(struct io_ret r, void *in_param)
   3.118  {
   3.119 -	struct cb_param *param = (struct cb_param *)in_param;
   3.120 -	pending_t *p = param->pent;
   3.121 -	blkif_request_t *req = p->req;
   3.122 -
   3.123 -	/* catch errors from the block code. */
   3.124 -	if (IO_INT(r) < 0) p->error++;
   3.125 -	
   3.126 +    struct cb_param *param = (struct cb_param *)in_param;
   3.127 +    pending_t *p = param->pent;
   3.128 +    blkif_request_t *req = p->req;
   3.129 +    
   3.130 +    /* catch errors from the block code. */
   3.131 +    if (IO_INT(r) < 0) p->error++;
   3.132 +    
   3.133      pthread_mutex_lock(&p->mutex);
   3.134      p->count--;
   3.135      
   3.136 -	if (p->count == 0) {
   3.137 +    if (p->count == 0) {
   3.138      	blkif_response_t *rsp;
   3.139      	
   3.140          rsp = (blkif_response_t *)req;
   3.141          rsp->id = req->id;
   3.142          rsp->operation = BLKIF_OP_WRITE;
   3.143      	if (p->error == 0) {
   3.144 -	        rsp->status = BLKIF_RSP_OKAY;
   3.145 +            rsp->status = BLKIF_RSP_OKAY;
   3.146      	} else {
   3.147 -    		rsp->status = BLKIF_RSP_ERROR;
   3.148 +            rsp->status = BLKIF_RSP_ERROR;
   3.149      	}
   3.150          blktap_inject_response(rsp);       
   3.151      }
   3.152      
   3.153      pthread_mutex_unlock(&p->mutex);
   3.154  	
   3.155 -	free(param); /* TODO: replace with cached alloc/dealloc */
   3.156 +    free(param); /* TODO: replace with cached alloc/dealloc */
   3.157  }
   3.158  
   3.159  int parallax_write(blkif_request_t *req, blkif_t *blkif)
   3.160 @@ -496,7 +495,7 @@ int parallax_write(blkif_request_t *req,
   3.161      
   3.162      for (i = 0; i < req->nr_segments; i++) {
   3.163          struct cb_param *p;
   3.164 -            
   3.165 +        
   3.166          spage  = (char *)MMAP_VADDR(ID_TO_IDX(req->id), i);
   3.167          
   3.168          /* Round the requested segment to a block address. */
   3.169 @@ -509,7 +508,7 @@ int parallax_write(blkif_request_t *req,
   3.170          offset = (sector << SECTOR_SHIFT) % BLOCK_SIZE;
   3.171          size = ( blkif_last_sect (req->frame_and_sects[i]) -
   3.172                   blkif_first_sect(req->frame_and_sects[i]) + 1
   3.173 -               ) << SECTOR_SHIFT;
   3.174 +            ) << SECTOR_SHIFT;
   3.175          start = blkif_first_sect(req->frame_and_sects[i]) << SECTOR_SHIFT;
   3.176  
   3.177          DPRINTF("ParallaxWrite: sect: %lld (%ld,%ld),  "
   3.178 @@ -527,15 +526,15 @@ int parallax_write(blkif_request_t *req,
   3.179              goto err;
   3.180          }
   3.181          
   3.182 - 		/* TODO: Replace this call to malloc with a cached allocation */
   3.183 -		p = (struct cb_param *)malloc(sizeof(struct cb_param));
   3.184 -		p->pent = pent;
   3.185 -		p->sector = sector; 
   3.186 -		p->segment = i;     
   3.187 -		p->vblock = vblock; /* dbg */
   3.188 -		
   3.189 +        /* TODO: Replace this call to malloc with a cached allocation */
   3.190 +        p = (struct cb_param *)malloc(sizeof(struct cb_param));
   3.191 +        p->pent = pent;
   3.192 +        p->sector = sector; 
   3.193 +        p->segment = i;     
   3.194 +        p->vblock = vblock; /* dbg */
   3.195 +        
   3.196          /* Issue the write to the store. */
   3.197 -	    async_write(vdi, vblock, spage, write_cb, (void *)p);
   3.198 +        async_write(vdi, vblock, spage, write_cb, (void *)p);
   3.199      }
   3.200  
   3.201      return BLKTAP_STOLEN;
     4.1 --- a/tools/blktap/requests-async.c	Tue Jun 07 15:30:17 2005 +0000
     4.2 +++ b/tools/blktap/requests-async.c	Wed Jun 08 08:16:41 2005 +0000
     4.3 @@ -1,6 +1,6 @@
     4.4 -/* read.c
     4.5 +/* requests-async.c
     4.6   *
     4.7 - * asynchronous read experiment for parallax.
     4.8 + * asynchronous request dispatcher for radix access in parallax.
     4.9   */
    4.10  
    4.11  #include <stdio.h>
    4.12 @@ -17,9 +17,6 @@
    4.13  #define L3_IDX(_a) (((_a) & 0x00000000000001ffULL))
    4.14  
    4.15  
    4.16 -
    4.17 -//#define STANDALONE
    4.18 -
    4.19  #if 0
    4.20  #define DPRINTF(_f, _a...) printf ( _f , ## _a )
    4.21  #else
    4.22 @@ -45,10 +42,10 @@ struct io_req {
    4.23  
    4.24  void clear_w_bits(radix_tree_node node) 
    4.25  {
    4.26 -	int i;
    4.27 -	for (i=0; i<RADIX_TREE_MAP_ENTRIES; i++)
    4.28 -		node[i] = node[i] & ONEMASK;
    4.29 -	return;
    4.30 +    int i;
    4.31 +    for (i=0; i<RADIX_TREE_MAP_ENTRIES; i++)
    4.32 +        node[i] = node[i] & ONEMASK;
    4.33 +    return;
    4.34  }
    4.35  
    4.36  enum states {
    4.37 @@ -89,15 +86,15 @@ enum states {
    4.38      ALLOC_L3_L2f,
    4.39      WRITE_L2_L3f,
    4.40  
    4.41 -	/* L1 Zero Path */
    4.42 +    /* L1 Zero Path */
    4.43      ALLOC_DATA_L1z,
    4.44      ALLOC_L3_L1z,
    4.45      ALLOC_L2_L1z,
    4.46      WRITE_L1_L1z,
    4.47  
    4.48 -	/* L1 Fault Path */
    4.49 -	READ_L2_L1f,
    4.50 -	READ_L3_L1f,
    4.51 +    /* L1 Fault Path */
    4.52 +    READ_L2_L1f,
    4.53 +    READ_L3_L1f,
    4.54      ALLOC_DATA_L1f,
    4.55      ALLOC_L3_L1f,
    4.56      ALLOC_L2_L1f,
    4.57 @@ -123,9 +120,9 @@ int async_read(vdi_t *vdi, u64 vaddr, io
    4.58      DPRINTF("async_read\n");
    4.59  
    4.60      req = (struct io_req *)malloc(sizeof (struct io_req));
    4.61 -	req->radix[0] = req->radix[1] = req->radix[2] = NULL;
    4.62 +    req->radix[0] = req->radix[1] = req->radix[2] = NULL;
    4.63  
    4.64 -	if (req == NULL) {perror("req was NULL in async_read"); return(-1); }
    4.65 +    if (req == NULL) {perror("req was NULL in async_read"); return(-1); }
    4.66  	
    4.67      req->op    = IO_OP_READ;
    4.68      req->root  = vdi->radix_root;
    4.69 @@ -135,7 +132,7 @@ int async_read(vdi_t *vdi, u64 vaddr, io
    4.70      req->param = param;
    4.71      req->state = READ_LOCKED;
    4.72  
    4.73 -	block_rlock(req->lock, L1_IDX(vaddr), read_cb, req);
    4.74 +    block_rlock(req->lock, L1_IDX(vaddr), read_cb, req);
    4.75  	
    4.76      return 0;
    4.77  }
    4.78 @@ -148,10 +145,9 @@ int   async_write(vdi_t *vdi, u64 vaddr,
    4.79  
    4.80  
    4.81      req = (struct io_req *)malloc(sizeof (struct io_req));
    4.82 -	req->radix[0] = req->radix[1] = req->radix[2] = NULL;
    4.83 -    //DPRINTF("async_write\n");
    4.84 +    req->radix[0] = req->radix[1] = req->radix[2] = NULL;
    4.85      
    4.86 -	if (req == NULL) {perror("req was NULL in async_write"); return(-1); }
    4.87 +    if (req == NULL) {perror("req was NULL in async_write"); return(-1); }
    4.88  
    4.89      req->op    = IO_OP_WRITE;
    4.90      req->root  = vdi->radix_root;
    4.91 @@ -163,10 +159,10 @@ int   async_write(vdi_t *vdi, u64 vaddr,
    4.92      req->radix_addr[L1] = getid(req->root); /* for consistency */
    4.93      req->state = WRITE_LOCKED;
    4.94  
    4.95 -	block_wlock(req->lock, L1_IDX(vaddr), write_cb, req);
    4.96 +    block_wlock(req->lock, L1_IDX(vaddr), write_cb, req);
    4.97  
    4.98  
    4.99 -	return 0;
   4.100 +    return 0;
   4.101  }
   4.102  
   4.103  void read_cb(struct io_ret ret, void *param)
   4.104 @@ -197,11 +193,11 @@ void read_cb(struct io_ret ret, void *pa
   4.105          idx  = getid( node[L1_IDX(req->vaddr)] );
   4.106          free(block);
   4.107          if ( idx == ZERO ) {
   4.108 -        	req->state = RETURN_ZERO;
   4.109 -        	block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req);
   4.110 +            req->state = RETURN_ZERO;
   4.111 +            block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req);
   4.112          } else {
   4.113 -	        req->state = READ_L2;
   4.114 -	        block_read(idx, read_cb, req);
   4.115 +            req->state = READ_L2;
   4.116 +            block_read(idx, read_cb, req);
   4.117          }
   4.118          break;
   4.119  
   4.120 @@ -214,11 +210,11 @@ void read_cb(struct io_ret ret, void *pa
   4.121          idx  = getid( node[L2_IDX(req->vaddr)] );
   4.122          free(block);
   4.123          if ( idx == ZERO ) {
   4.124 -        	req->state = RETURN_ZERO;
   4.125 -        	block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req);
   4.126 +            req->state = RETURN_ZERO;
   4.127 +            block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req);
   4.128          } else {
   4.129 -	        req->state = READ_L3;
   4.130 -	        block_read(idx, read_cb, req);
   4.131 +            req->state = READ_L3;
   4.132 +            block_read(idx, read_cb, req);
   4.133          }
   4.134          break;
   4.135  
   4.136 @@ -231,11 +227,11 @@ void read_cb(struct io_ret ret, void *pa
   4.137          idx  = getid( node[L3_IDX(req->vaddr)] );
   4.138          free(block);
   4.139          if ( idx == ZERO )  {
   4.140 -        	req->state = RETURN_ZERO;
   4.141 -        	block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req);
   4.142 +            req->state = RETURN_ZERO;
   4.143 +            block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req);
   4.144          } else {
   4.145 -	        req->state = READ_DATA;
   4.146 -	        block_read(idx, read_cb, req);
   4.147 +            req->state = READ_DATA;
   4.148 +            block_read(idx, read_cb, req);
   4.149          }
   4.150          break;
   4.151  
   4.152 @@ -249,9 +245,9 @@ void read_cb(struct io_ret ret, void *pa
   4.153          break;
   4.154          
   4.155      case READ_UNLOCKED:
   4.156 -	{
   4.157 -		struct io_ret r;
   4.158 -		io_cb_t cb;
   4.159 +    {
   4.160 +        struct io_ret r;
   4.161 +        io_cb_t cb;
   4.162          DPRINTF("READ_UNLOCKED\n");
   4.163          req_param = req->param;
   4.164          r         = req->retval;
   4.165 @@ -262,18 +258,18 @@ void read_cb(struct io_ret ret, void *pa
   4.166      }
   4.167      
   4.168      case RETURN_ZERO:
   4.169 -	{
   4.170 -		struct io_ret r;
   4.171 -		io_cb_t cb;
   4.172 -	    DPRINTF("RETURN_ZERO\n");
   4.173 -	    req_param = req->param;
   4.174 +    {
   4.175 +        struct io_ret r;
   4.176 +        io_cb_t cb;
   4.177 +        DPRINTF("RETURN_ZERO\n");
   4.178 +        req_param = req->param;
   4.179          cb        = req->cb;
   4.180 -	    free(req);
   4.181 +        free(req);
   4.182          r.type = IO_BLOCK_T;
   4.183          r.u.b = newblock();
   4.184 -	    cb(r, req_param);
   4.185 -	    break;
   4.186 -	}
   4.187 +        cb(r, req_param);
   4.188 +        break;
   4.189 +    }
   4.190          
   4.191      default:
   4.192      	DPRINTF("*** Write: Bad state! (%d) ***\n", req->state);
   4.193 @@ -283,16 +279,16 @@ void read_cb(struct io_ret ret, void *pa
   4.194      return;
   4.195  
   4.196   fail:
   4.197 -	{
   4.198 -		struct io_ret r;
   4.199 -		io_cb_t cb;
   4.200 -		DPRINTF("asyn_read had a read error.\n");
   4.201 +    {
   4.202 +        struct io_ret r;
   4.203 +        io_cb_t cb;
   4.204 +        DPRINTF("asyn_read had a read error.\n");
   4.205          req_param = req->param;
   4.206          r         = ret;
   4.207          cb        = req->cb;
   4.208          free(req);
   4.209          cb(r, req_param);
   4.210 -	}
   4.211 +    }
   4.212  
   4.213  
   4.214  }
   4.215 @@ -304,11 +300,10 @@ void write_cb(struct io_ret r, void *par
   4.216      u64 a, addr;
   4.217      void *req_param;
   4.218  
   4.219 -    //DPRINTF("write_cb\n");
   4.220      switch(req->state) {
   4.221      	
   4.222      case WRITE_LOCKED:
   4.223 -    
   4.224 +        
   4.225          DPRINTF("WRITE_LOCKED (%llu)\n", L1_IDX(req->vaddr));
   4.226      	req->state = READ_L1;
   4.227      	block_read(getid(req->root), write_cb, req); 
   4.228 @@ -326,9 +321,9 @@ void write_cb(struct io_ret r, void *par
   4.229          req->radix[L1] = node;
   4.230  
   4.231          if ( addr == ZERO ) {
   4.232 -        	/* L1 empty subtree: */
   4.233 -        	req->state = ALLOC_DATA_L1z;
   4.234 -        	block_alloc( req->block, write_cb, req );
   4.235 +            /* L1 empty subtree: */
   4.236 +            req->state = ALLOC_DATA_L1z;
   4.237 +            block_alloc( req->block, write_cb, req );
   4.238          } else if ( !iswritable(a) ) {
   4.239              /* L1 fault: */
   4.240              req->state = READ_L2_L1f;
   4.241 @@ -351,7 +346,7 @@ void write_cb(struct io_ret r, void *par
   4.242          req->radix[L2] = node;
   4.243  
   4.244          if ( addr == ZERO ) {
   4.245 -        	/* L2 empty subtree: */
   4.246 +            /* L2 empty subtree: */
   4.247              req->state = ALLOC_DATA_L2z;
   4.248              block_alloc( req->block, write_cb, req );
   4.249          } else if ( !iswritable(a) ) {
   4.250 @@ -447,7 +442,7 @@ void write_cb(struct io_ret r, void *par
   4.251          addr = getid(a);
   4.252  
   4.253          req->radix[L3] = node;
   4.254 -		req->state = ALLOC_DATA_L2f;
   4.255 +        req->state = ALLOC_DATA_L2f;
   4.256          block_alloc( req->block, write_cb, req );
   4.257          break;
   4.258                  
   4.259 @@ -520,14 +515,14 @@ void write_cb(struct io_ret r, void *par
   4.260          req->radix[L2] = node;
   4.261          
   4.262          if (addr == ZERO) {
   4.263 -        	/* nothing below L2, create an empty L3 and alloc data. */
   4.264 -        	/* (So skip READ_L3_L1f.) */
   4.265 -        	req->radix[L3] = newblock();
   4.266 -        	req->state = ALLOC_DATA_L1f;
   4.267 -        	block_alloc( req->block, write_cb, req );
   4.268 +            /* nothing below L2, create an empty L3 and alloc data. */
   4.269 +            /* (So skip READ_L3_L1f.) */
   4.270 +            req->radix[L3] = newblock();
   4.271 +            req->state = ALLOC_DATA_L1f;
   4.272 +            block_alloc( req->block, write_cb, req );
   4.273          } else {
   4.274 -			req->state = READ_L3_L1f;
   4.275 -			block_read( addr, write_cb, req );
   4.276 +            req->state = READ_L3_L1f;
   4.277 +            block_read( addr, write_cb, req );
   4.278          }
   4.279          break;
   4.280          
   4.281 @@ -541,7 +536,7 @@ void write_cb(struct io_ret r, void *par
   4.282          addr = getid(a);
   4.283  
   4.284          req->radix[L3] = node;
   4.285 -		req->state = ALLOC_DATA_L1f;
   4.286 +        req->state = ALLOC_DATA_L1f;
   4.287          block_alloc( req->block, write_cb, req );
   4.288          break;
   4.289                  
   4.290 @@ -587,7 +582,7 @@ void write_cb(struct io_ret r, void *par
   4.291          DPRINTF("DONE\n");
   4.292          /* free any saved node vals. */
   4.293          for (i=0; i<3; i++)
   4.294 -        	if (req->radix[i] != 0) free(req->radix[i]);
   4.295 +            if (req->radix[i] != 0) free(req->radix[i]);
   4.296          req->retval = r;
   4.297          req->state = WRITE_UNLOCKED;
   4.298          block_wunlock(req->lock, L1_IDX(req->vaddr), write_cb, req);
   4.299 @@ -601,7 +596,7 @@ void write_cb(struct io_ret r, void *par
   4.300          req_param = req->param;
   4.301          r         = req->retval;
   4.302          cb        = req->cb;
   4.303 -	    free(req);
   4.304 +        free(req);
   4.305          cb(r, req_param);
   4.306          break;
   4.307      }
   4.308 @@ -614,16 +609,16 @@ void write_cb(struct io_ret r, void *par
   4.309      return;
   4.310      
   4.311   fail:
   4.312 -	{
   4.313 -		struct io_ret r;
   4.314 -		io_cb_t cb;
   4.315 -		DPRINTF("asyn_write had a read error mid-way.\n");
   4.316 +    {
   4.317 +        struct io_ret r;
   4.318 +        io_cb_t cb;
   4.319 +        DPRINTF("asyn_write had a read error mid-way.\n");
   4.320          req_param = req->param;
   4.321          cb        = req->cb;
   4.322          r.type = IO_INT_T;
   4.323          r.u.i  = -1;
   4.324          free(req);
   4.325          cb(r, req_param);
   4.326 -	}
   4.327 +    }
   4.328  }
   4.329