ia64/xen-unstable

view xen/drivers/block/xen_block.c @ 906:f8e22c28741a

bitkeeper revision 1.573 (3fabd10f_-HPTkLPE6J9ARwOzz2XCQ)

tweak bk thing
author smh22@labyrinth.cl.cam.ac.uk
date Fri Nov 07 17:06:23 2003 +0000 (2003-11-07)
parents e50a38309067 a758526e0e84
children 4aba3a48d64f 0a901de56d7c
line source
1 /*
2 * xen_block.c
3 *
4 * process incoming block io requests from guestos's.
5 */
7 #include <xeno/config.h>
8 #include <xeno/types.h>
9 #include <xeno/lib.h>
10 #include <xeno/sched.h>
11 #include <xeno/blkdev.h>
12 #include <xeno/event.h>
13 #include <hypervisor-ifs/block.h>
14 #include <hypervisor-ifs/hypervisor-if.h>
15 #include <asm-i386/io.h>
16 #include <asm/domain_page.h>
17 #include <xeno/spinlock.h>
18 #include <xeno/keyhandler.h>
19 #include <xeno/interrupt.h>
20 #include <xeno/vbd.h>
21 #include <xeno/slab.h>
23 #if 1
24 #define DPRINTK(_f, _a...) printk( _f , ## _a )
25 #else
26 #define DPRINTK(_f, _a...) ((void)0)
27 #endif
29 /*
30 * These are rather arbitrary. They are fairly large because adjacent
31 * requests pulled from a communication ring are quite likely to end
32 * up being part of the same scatter/gather request at the disc.
33 *
34 * ** TRY INCREASING 'MAX_PENDING_REQS' IF WRITE SPEEDS SEEM TOO LOW **
35 * This will increase the chances of being able to write whole tracks.
36 * '64' should be enough to keep us competitive with Linux.
37 */
38 #define MAX_PENDING_REQS 64
39 #define BATCH_PER_DOMAIN 16
41 /*
42 * Each outstanding request which we've passed to the lower device layers
43 * has a 'pending_req' allocated to it. Each buffer_head that completes
44 * decrements the pendcnt towards zero. When it hits zero, the specified
45 * domain has a response queued for it, with the saved 'id' passed back.
46 *
47 * We can't allocate pending_req's in order, since they may complete out
48 * of order. We therefore maintain an allocation ring. This ring also
49 * indicates when enough work has been passed down -- at that point the
50 * allocation ring will be empty.
51 */
52 static pending_req_t pending_reqs[MAX_PENDING_REQS];
53 static unsigned char pending_ring[MAX_PENDING_REQS];
54 static unsigned int pending_prod, pending_cons;
55 static spinlock_t pend_prod_lock = SPIN_LOCK_UNLOCKED;
56 #define PENDREQ_IDX_INC(_i) ((_i) = ((_i)+1) & (MAX_PENDING_REQS-1))
58 static kmem_cache_t *buffer_head_cachep;
59 static atomic_t nr_pending;
61 static int __buffer_is_valid(struct task_struct *p,
62 unsigned long buffer,
63 unsigned short size,
64 int writeable_buffer);
65 static void __lock_buffer(unsigned long buffer,
66 unsigned short size,
67 int writeable_buffer);
68 static void unlock_buffer(struct task_struct *p,
69 unsigned long buffer,
70 unsigned short size,
71 int writeable_buffer);
73 static void io_schedule(unsigned long unused);
74 static int do_block_io_op_domain(struct task_struct *p, int max_to_do);
75 static void dispatch_rw_block_io(struct task_struct *p, int index);
76 static void dispatch_probe(struct task_struct *p, int index);
77 static void dispatch_debug_block_io(struct task_struct *p, int index);
78 static void make_response(struct task_struct *p, unsigned long id,
79 unsigned short op, unsigned long st);
82 /******************************************************************
83 * BLOCK-DEVICE SCHEDULER LIST MAINTENANCE
84 */
86 static struct list_head io_schedule_list;
87 static spinlock_t io_schedule_list_lock;
89 static int __on_blkdev_list(struct task_struct *p)
90 {
91 return p->blkdev_list.next != NULL;
92 }
94 static void remove_from_blkdev_list(struct task_struct *p)
95 {
96 unsigned long flags;
97 if ( !__on_blkdev_list(p) ) return;
98 spin_lock_irqsave(&io_schedule_list_lock, flags);
99 if ( __on_blkdev_list(p) )
100 {
101 list_del(&p->blkdev_list);
102 p->blkdev_list.next = NULL;
103 put_task_struct(p);
104 }
105 spin_unlock_irqrestore(&io_schedule_list_lock, flags);
106 }
108 static void add_to_blkdev_list_tail(struct task_struct *p)
109 {
110 unsigned long flags;
111 if ( __on_blkdev_list(p) ) return;
112 spin_lock_irqsave(&io_schedule_list_lock, flags);
113 if ( !__on_blkdev_list(p) )
114 {
115 list_add_tail(&p->blkdev_list, &io_schedule_list);
116 get_task_struct(p);
117 }
118 spin_unlock_irqrestore(&io_schedule_list_lock, flags);
119 }
122 /******************************************************************
123 * SCHEDULER FUNCTIONS
124 */
126 static DECLARE_TASKLET(io_schedule_tasklet, io_schedule, 0);
128 static void io_schedule(unsigned long unused)
129 {
130 struct task_struct *p;
131 struct list_head *ent;
133 /* Queue up a batch of requests. */
134 while ( (atomic_read(&nr_pending) < MAX_PENDING_REQS) &&
135 !list_empty(&io_schedule_list) )
136 {
137 ent = io_schedule_list.next;
138 p = list_entry(ent, struct task_struct, blkdev_list);
139 get_task_struct(p);
140 remove_from_blkdev_list(p);
141 if ( do_block_io_op_domain(p, BATCH_PER_DOMAIN) )
142 add_to_blkdev_list_tail(p);
143 put_task_struct(p);
144 }
146 /* Push the batch through to disc. */
147 run_task_queue(&tq_disk);
148 }
150 static void maybe_trigger_io_schedule(void)
151 {
152 /*
153 * Needed so that two processes, who together make the following predicate
154 * true, don't both read stale values and evaluate the predicate
155 * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
156 */
157 smp_mb();
159 if ( (atomic_read(&nr_pending) < (MAX_PENDING_REQS/2)) &&
160 !list_empty(&io_schedule_list) )
161 {
162 tasklet_schedule(&io_schedule_tasklet);
163 }
164 }
168 /******************************************************************
169 * COMPLETION CALLBACK -- Called as bh->b_end_io()
170 */
172 static void end_block_io_op(struct buffer_head *bh, int uptodate)
173 {
174 unsigned long flags;
175 pending_req_t *pending_req = bh->pending_req;
177 /* An error fails the entire request. */
178 if ( !uptodate )
179 {
180 DPRINTK("Buffer not up-to-date at end of operation\n");
181 pending_req->status = 2;
182 }
184 unlock_buffer(pending_req->domain,
185 virt_to_phys(bh->b_data),
186 bh->b_size,
187 (pending_req->operation==READ));
189 if ( atomic_dec_and_test(&pending_req->pendcnt) )
190 {
191 make_response(pending_req->domain, pending_req->id,
192 pending_req->operation, pending_req->status);
193 put_task_struct(pending_req->domain);
194 spin_lock_irqsave(&pend_prod_lock, flags);
195 pending_ring[pending_prod] = pending_req - pending_reqs;
196 PENDREQ_IDX_INC(pending_prod);
197 spin_unlock_irqrestore(&pend_prod_lock, flags);
198 atomic_dec(&nr_pending);
199 maybe_trigger_io_schedule();
200 }
202 kmem_cache_free(buffer_head_cachep, bh);
203 }
204 /* ----[ Syscall Interface ]------------------------------------------------*/
206 long do_block_io_op(block_io_op_t *u_block_io_op)
207 {
208 long ret = 0;
209 block_io_op_t op;
210 struct task_struct *p = current;
212 if (copy_from_user(&op, u_block_io_op, sizeof(op)))
213 return -EFAULT;
215 switch (op.cmd) {
217 case BLOCK_IO_OP_SIGNAL:
218 /* simply indicates there're reqs outstanding => add current to list */
219 add_to_blkdev_list_tail(p);
220 maybe_trigger_io_schedule();
221 break;
223 case BLOCK_IO_OP_RESET:
224 /* Avoid a race with the tasklet. */
225 remove_from_blkdev_list(p);
226 if ( p->blk_req_cons != p->blk_resp_prod )
227 {
228 /* Interface isn't quiescent. */
229 ret = -EINVAL;
230 }
231 else
232 {
233 p->blk_req_cons = p->blk_resp_prod = 0;
234 ret = 0;
235 }
236 break;
238 case BLOCK_IO_OP_RING_ADDRESS:
239 op.u.ring_mfn = virt_to_phys(p->blk_ring_base) >> PAGE_SHIFT;
240 ret = copy_to_user(u_block_io_op, &op, sizeof(op)) ? -EFAULT : 0;
241 break;
243 case BLOCK_IO_OP_VBD_CREATE:
244 /* create a new VBD for a given domain; caller must be privileged */
245 if(!IS_PRIV(p))
246 return -EPERM;
247 ret = vbd_create(&op.u.create_info);
248 break;
250 case BLOCK_IO_OP_VBD_ADD:
251 /* add an extent to a VBD; caller must be privileged */
252 if(!IS_PRIV(p))
253 return -EPERM;
254 ret = vbd_add(&op.u.add_info);
255 break;
257 case BLOCK_IO_OP_VBD_REMOVE:
258 /* remove an extent from a VBD; caller must be privileged */
259 if(!IS_PRIV(p))
260 return -EPERM;
261 ret = vbd_remove(&op.u.remove_info);
262 break;
264 case BLOCK_IO_OP_VBD_DELETE:
265 /* delete a VBD; caller must be privileged */
266 if(!IS_PRIV(p))
267 return -EPERM;
268 ret = vbd_delete(&op.u.delete_info);
269 break;
271 default:
272 ret = -ENOSYS;
273 }
276 return ret;
277 }
281 /******************************************************************
282 * DOWNWARD CALLS -- These interface with the block-device layer proper.
283 */
285 static int __buffer_is_valid(struct task_struct *p,
286 unsigned long buffer,
287 unsigned short size,
288 int writeable_buffer)
289 {
290 unsigned long pfn;
291 struct pfn_info *page;
292 int rc = 0;
294 /* A request may span multiple page frames. Each must be checked. */
295 for ( pfn = buffer >> PAGE_SHIFT;
296 pfn < ((buffer + size + PAGE_SIZE - 1) >> PAGE_SHIFT);
297 pfn++ )
298 {
299 /* Each frame must be within bounds of machine memory. */
300 if ( pfn >= max_page )
301 {
302 DPRINTK("pfn out of range: %08lx\n", pfn);
303 goto out;
304 }
306 page = frame_table + pfn;
308 /* Each frame must belong to the requesting domain. */
309 if ( (page->flags & PG_domain_mask) != p->domain )
310 {
311 DPRINTK("bad domain: expected %d, got %ld\n",
312 p->domain, page->flags & PG_domain_mask);
313 goto out;
314 }
316 /* If reading into the frame, the frame must be writeable. */
317 if ( writeable_buffer &&
318 ((page->flags & PG_type_mask) != PGT_writeable_page) &&
319 (page->type_count != 0) )
320 {
321 DPRINTK("non-writeable page passed for block read\n");
322 goto out;
323 }
324 }
326 rc = 1;
327 out:
328 return rc;
329 }
331 static void __lock_buffer(unsigned long buffer,
332 unsigned short size,
333 int writeable_buffer)
334 {
335 unsigned long pfn;
336 struct pfn_info *page;
338 for ( pfn = buffer >> PAGE_SHIFT;
339 pfn < ((buffer + size + PAGE_SIZE - 1) >> PAGE_SHIFT);
340 pfn++ )
341 {
342 page = frame_table + pfn;
343 if ( writeable_buffer )
344 {
345 if ( page->type_count == 0 )
346 {
347 page->flags &= ~PG_type_mask;
348 /* No need for PG_need_flush here. */
349 page->flags |= PGT_writeable_page;
350 }
351 get_page_type(page);
352 }
353 get_page_tot(page);
354 }
355 }
357 static void unlock_buffer(struct task_struct *p,
358 unsigned long buffer,
359 unsigned short size,
360 int writeable_buffer)
361 {
362 unsigned long pfn, flags;
363 struct pfn_info *page;
365 spin_lock_irqsave(&p->page_lock, flags);
366 for ( pfn = buffer >> PAGE_SHIFT;
367 pfn < ((buffer + size + PAGE_SIZE - 1) >> PAGE_SHIFT);
368 pfn++ )
369 {
370 page = frame_table + pfn;
371 if ( writeable_buffer )
372 put_page_type(page);
373 put_page_tot(page);
374 }
375 spin_unlock_irqrestore(&p->page_lock, flags);
376 }
378 static int do_block_io_op_domain(struct task_struct *p, int max_to_do)
379 {
380 blk_ring_t *blk_ring = p->blk_ring_base;
381 int i, more_to_do = 0;
383 /*
384 * Take items off the comms ring, taking care not to catch up
385 * with the response-producer index.
386 */
387 for ( i = p->blk_req_cons;
388 (i != blk_ring->req_prod) &&
389 (((p->blk_resp_prod-i) & (BLK_RING_SIZE-1)) != 1);
390 i = BLK_RING_INC(i) )
391 {
392 if ( (max_to_do-- == 0) ||
393 (atomic_read(&nr_pending) == MAX_PENDING_REQS) )
394 {
395 more_to_do = 1;
396 break;
397 }
399 switch ( blk_ring->ring[i].req.operation )
400 {
401 case XEN_BLOCK_READ:
402 case XEN_BLOCK_WRITE:
403 dispatch_rw_block_io(p, i);
404 break;
406 case XEN_BLOCK_PROBE:
407 dispatch_probe(p, i);
408 break;
410 case XEN_BLOCK_DEBUG:
411 dispatch_debug_block_io(p, i);
412 break;
414 default:
415 DPRINTK("error: unknown block io operation [%d]\n",
416 blk_ring->ring[i].req.operation);
417 make_response(p, blk_ring->ring[i].req.id,
418 blk_ring->ring[i].req.operation, 1);
419 break;
420 }
421 }
423 p->blk_req_cons = i;
424 return more_to_do;
425 }
427 static void dispatch_debug_block_io(struct task_struct *p, int index)
428 {
429 DPRINTK("dispatch_debug_block_io: unimplemented\n");
430 }
433 static void dispatch_probe(struct task_struct *p, int index)
434 {
435 extern void ide_probe_devices(xen_disk_info_t *xdi);
436 extern void scsi_probe_devices(xen_disk_info_t *xdi);
437 extern void vbd_probe_devices(xen_disk_info_t *xdi, struct task_struct *p);
439 blk_ring_t *blk_ring = p->blk_ring_base;
440 xen_disk_info_t *xdi;
441 unsigned long flags, buffer;
442 int rc = 0;
444 buffer = blk_ring->ring[index].req.buffer_and_sects[0] & ~0x1FF;
446 spin_lock_irqsave(&p->page_lock, flags);
447 if ( !__buffer_is_valid(p, buffer, sizeof(xen_disk_info_t), 1) )
448 {
449 DPRINTK("Bad buffer in dispatch_probe_blk\n");
450 spin_unlock_irqrestore(&p->page_lock, flags);
451 rc = 1;
452 goto out;
453 }
455 __lock_buffer(buffer, sizeof(xen_disk_info_t), 1);
456 spin_unlock_irqrestore(&p->page_lock, flags);
458 /*
459 ** XXX SMH: all three of the below probe functions /append/ their
460 ** info to the xdi array; i.e. they assume that all earlier slots
461 ** are correctly filled, and that xdi->count points to the first
462 ** free entry in the array. All kinda gross but it'll do for now.
463 */
464 xdi = map_domain_mem(buffer);
465 xdi->count = 0;
467 if(IS_PRIV(p)) {
468 /* privileged domains always get access to the 'real' devices */
469 ide_probe_devices(xdi);
470 scsi_probe_devices(xdi);
471 }
472 vbd_probe_devices(xdi, p);
473 unmap_domain_mem(xdi);
475 unlock_buffer(p, buffer, sizeof(xen_disk_info_t), 1);
477 out:
478 make_response(p, blk_ring->ring[index].req.id, XEN_BLOCK_PROBE, rc);
479 }
481 static void dispatch_rw_block_io(struct task_struct *p, int index)
482 {
483 extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
484 blk_ring_t *blk_ring = p->blk_ring_base;
485 blk_ring_req_entry_t *req = &blk_ring->ring[index].req;
486 struct buffer_head *bh;
487 int operation = (req->operation == XEN_BLOCK_WRITE) ? WRITE : READ;
488 unsigned short nr_sects;
489 unsigned long buffer, flags;
490 int i, rc, tot_sects;
491 pending_req_t *pending_req;
493 /* We map virtual scatter/gather segments to physical segments. */
494 int new_segs, nr_psegs = 0;
495 phys_seg_t phys_seg[MAX_BLK_SEGS * 2];
497 spin_lock_irqsave(&p->page_lock, flags);
499 /* Check that number of segments is sane. */
500 if ( (req->nr_segments == 0) || (req->nr_segments > MAX_BLK_SEGS) )
501 {
502 DPRINTK("Bad number of segments in request (%d)\n", req->nr_segments);
503 goto bad_descriptor;
504 }
506 /*
507 * Check each address/size pair is sane, and convert into a
508 * physical device and block offset. Note that if the offset and size
509 * crosses a virtual extent boundary, we may end up with more
510 * physical scatter/gather segments than virtual segments.
511 */
512 for ( i = tot_sects = 0; i < req->nr_segments; i++, tot_sects += nr_sects )
513 {
514 buffer = req->buffer_and_sects[i] & ~0x1FF;
515 nr_sects = req->buffer_and_sects[i] & 0x1FF;
517 if ( nr_sects == 0 )
518 {
519 DPRINTK("zero-sized data request\n");
520 goto bad_descriptor;
521 }
523 if ( !__buffer_is_valid(p, buffer, nr_sects<<9, (operation==READ)) )
524 {
525 DPRINTK("invalid buffer\n");
526 goto bad_descriptor;
527 }
529 phys_seg[nr_psegs].dev = req->device;
530 phys_seg[nr_psegs].sector_number = req->sector_number + tot_sects;
531 phys_seg[nr_psegs].buffer = buffer;
532 phys_seg[nr_psegs].nr_sects = nr_sects;
534 /* Translate the request into the relevant 'physical device' */
535 new_segs = 1;
536 rc = vbd_translate(&phys_seg[nr_psegs], &new_segs, p, operation);
538 /* If it fails we bail (unless the caller is priv => has raw access) */
539 if(rc) {
540 if(!IS_PRIV(p)) {
541 printk("access denied: %s of [%ld,%ld] on dev=%04x\n",
542 operation == READ ? "read" : "write",
543 req->sector_number + tot_sects,
544 req->sector_number + tot_sects + nr_sects,
545 req->device);
546 goto bad_descriptor;
547 }
549 /* SMH: skanky hack; clear any 'partition' info in device */
550 phys_seg[nr_psegs].dev = req->device & 0xFFF0;
551 }
553 nr_psegs += new_segs;
554 if ( nr_psegs >= (MAX_BLK_SEGS*2) ) BUG();
555 }
557 /* Lock pages associated with each buffer head. */
558 for ( i = 0; i < nr_psegs; i++ )
559 __lock_buffer(phys_seg[i].buffer, phys_seg[i].nr_sects<<9,
560 (operation==READ));
561 spin_unlock_irqrestore(&p->page_lock, flags);
563 atomic_inc(&nr_pending);
564 pending_req = pending_reqs + pending_ring[pending_cons];
565 PENDREQ_IDX_INC(pending_cons);
566 pending_req->domain = p;
567 pending_req->id = req->id;
568 pending_req->operation = operation;
569 pending_req->status = 0;
570 atomic_set(&pending_req->pendcnt, nr_psegs);
572 get_task_struct(p);
574 /* Now we pass each segment down to the real blkdev layer. */
575 for ( i = 0; i < nr_psegs; i++ )
576 {
577 bh = kmem_cache_alloc(buffer_head_cachep, GFP_KERNEL);
578 if ( bh == NULL ) panic("bh is null\n");
579 memset (bh, 0, sizeof (struct buffer_head));
581 bh->b_size = phys_seg[i].nr_sects << 9;
582 bh->b_dev = phys_seg[i].dev;
583 bh->b_rsector = phys_seg[i].sector_number;
584 bh->b_data = phys_to_virt(phys_seg[i].buffer);
585 bh->b_end_io = end_block_io_op;
586 bh->pending_req = pending_req;
588 if ( operation == WRITE )
589 {
590 bh->b_state = (1 << BH_JBD) | (1 << BH_Mapped) | (1 << BH_Req) |
591 (1 << BH_Dirty) | (1 << BH_Uptodate) | (1 << BH_Write);
592 }
593 else
594 {
595 bh->b_state = (1 << BH_Mapped) | (1 << BH_Read);
596 }
598 /* Dispatch a single request. We'll flush it to disc later. */
599 ll_rw_block(operation, 1, &bh);
600 }
602 return;
604 bad_descriptor:
605 spin_unlock_irqrestore(&p->page_lock, flags);
606 make_response(p, req->id, req->operation, 1);
607 }
611 /******************************************************************
612 * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
613 */
616 static void make_response(struct task_struct *p, unsigned long id,
617 unsigned short op, unsigned long st)
618 {
619 unsigned long cpu_mask, flags;
620 int position;
621 blk_ring_t *blk_ring;
623 /* Place on the response ring for the relevant domain. */
624 spin_lock_irqsave(&p->blk_ring_lock, flags);
625 blk_ring = p->blk_ring_base;
626 position = p->blk_resp_prod;
627 blk_ring->ring[position].resp.id = id;
628 blk_ring->ring[position].resp.operation = op;
629 blk_ring->ring[position].resp.status = st;
630 p->blk_resp_prod = blk_ring->resp_prod = BLK_RING_INC(position);
631 spin_unlock_irqrestore(&p->blk_ring_lock, flags);
633 /* Kick the relevant domain. */
634 cpu_mask = mark_guest_event(p, _EVENT_BLKDEV);
635 guest_event_notify(cpu_mask);
636 }
638 static void dump_blockq(u_char key, void *dev_id, struct pt_regs *regs)
639 {
640 struct task_struct *p;
641 blk_ring_t *blk_ring ;
643 printk("Dumping block queue stats: nr_pending = %d (prod=%d,cons=%d)\n",
644 atomic_read(&nr_pending), pending_prod, pending_cons);
646 p = current->next_task;
647 do
648 {
649 if ( !is_idle_task(p) )
650 {
651 printk("Domain: %d\n", p->domain);
652 blk_ring = p->blk_ring_base;
654 printk(" req_prod:%d, req_cons:%d resp_prod:%d/%d on_list=%d\n",
655 blk_ring->req_prod, p->blk_req_cons,
656 blk_ring->resp_prod, p->blk_resp_prod,
657 __on_blkdev_list(p));
658 }
659 p = p->next_task;
660 } while (p != current);
661 }
663 /* Start-of-day initialisation for a new domain. */
664 void init_blkdev_info(struct task_struct *p)
665 {
666 if ( sizeof(*p->blk_ring_base) > PAGE_SIZE ) BUG();
667 p->blk_ring_base = (blk_ring_t *)get_free_page(GFP_KERNEL);
668 clear_page(p->blk_ring_base);
669 SHARE_PFN_WITH_DOMAIN(virt_to_page(p->blk_ring_base), p->domain);
670 p->blkdev_list.next = NULL;
671 }
673 /* End-of-day teardown for a domain. */
674 void destroy_blkdev_info(struct task_struct *p)
675 {
676 ASSERT(!__on_blkdev_list(p));
677 UNSHARE_PFN(virt_to_page(p->blk_ring_base));
678 free_page((unsigned long)p->blk_ring_base);
679 }
681 void unlink_blkdev_info(struct task_struct *p)
682 {
683 unsigned long flags;
685 spin_lock_irqsave(&io_schedule_list_lock, flags);
686 if ( __on_blkdev_list(p) )
687 {
688 list_del(&p->blkdev_list);
689 p->blkdev_list.next = (void *)0xdeadbeef; /* prevent reinsertion */
690 put_task_struct(p);
691 }
692 spin_unlock_irqrestore(&io_schedule_list_lock, flags);
693 }
695 void initialize_block_io ()
696 {
697 int i;
699 atomic_set(&nr_pending, 0);
700 pending_prod = pending_cons = 0;
701 memset(pending_reqs, 0, sizeof(pending_reqs));
702 for ( i = 0; i < MAX_PENDING_REQS; i++ ) pending_ring[i] = i;
704 spin_lock_init(&io_schedule_list_lock);
705 INIT_LIST_HEAD(&io_schedule_list);
707 buffer_head_cachep = kmem_cache_create(
708 "buffer_head_cache", sizeof(struct buffer_head),
709 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
711 add_key_handler('b', dump_blockq, "dump xen ide blkdev statistics");
712 }