ia64/xen-unstable

changeset 3349:fb96571465e9

bitkeeper revision 1.1159.207.3 (41d40e9b3_4vSbRgi7pJ9tnNxCToeQ)

New generic I/O ring macros from Andrew Warfield and Tim Deegan.
Currently only used for block-device channels.
author kaf24@scramble.cl.cam.ac.uk
date Thu Dec 30 14:20:11 2004 +0000 (2004-12-30)
parents a026414250bd
children cdbaaaab2ca8
files .rootkeys linux-2.6.10-xen-sparse/drivers/xen/blkback/blkback.c linux-2.6.10-xen-sparse/drivers/xen/blkback/common.h linux-2.6.10-xen-sparse/drivers/xen/blkback/interface.c linux-2.6.10-xen-sparse/drivers/xen/blkfront/blkfront.c linux-2.6.10-xen-sparse/drivers/xen/blkfront/block.h xen/include/public/io/blkif.h xen/include/public/io/ring.h
line diff
     1.1 --- a/.rootkeys	Thu Dec 30 12:01:47 2004 +0000
     1.2 +++ b/.rootkeys	Thu Dec 30 14:20:11 2004 +0000
     1.3 @@ -860,6 +860,7 @@ 40f5623bqoi4GEoBiiUc6TZk1HjsMg xen/inclu
     1.4  40dc4076pVeE1kEEWzcUaNZin65kCA xen/include/public/io/domain_controller.h
     1.5  41c0c412FLc0gunlJl91qMYscFtXVA xen/include/public/io/ioreq.h
     1.6  40f5623cTZ80EwjWUBlh44A9F9i_Lg xen/include/public/io/netif.h
     1.7 +41d40e9b8zCk5VDqhVbuQyhc7G3lqA xen/include/public/io/ring.h
     1.8  4051db79512nOCGweabrFWO2M2h5ng xen/include/public/physdev.h
     1.9  40589968wmhPmV5-ENbBYmMjnedgKw xen/include/public/sched_ctl.h
    1.10  404f3d2eR2Owk-ZcGOx9ULGHg3nrww xen/include/public/trace.h
     2.1 --- a/linux-2.6.10-xen-sparse/drivers/xen/blkback/blkback.c	Thu Dec 30 12:01:47 2004 +0000
     2.2 +++ b/linux-2.6.10-xen-sparse/drivers/xen/blkback/blkback.c	Thu Dec 30 14:20:11 2004 +0000
     2.3 @@ -74,12 +74,11 @@ static kmem_cache_t *buffer_head_cachep;
     2.4   * If the tap driver is used, we may get pages belonging to either the tap
     2.5   * or (more likely) the real frontend.  The backend must specify which domain
     2.6   * a given page belongs to in update_va_mapping though.  For the moment, 
     2.7 - * we pass in the domid of the real frontend in PROBE messages and store 
     2.8 - * this value in alt_dom.  Then on mapping, we try both.  This is a Guiness 
     2.9 - * book of records-calibre grim hack, and represents a bit of a security risk.
    2.10 - * Grant tables will soon solve the problem though!
    2.11 + * the tap rewrites the ID field of the request to contain the request index
    2.12 + * and the id of the real front end domain.
    2.13   */
    2.14 -static domid_t alt_dom = 0;
    2.15 +#define BLKTAP_COOKIE 0xbeadfeed
    2.16 +static inline domid_t ID_TO_DOM(unsigned long id) { return (id >> 16); }
    2.17  #endif
    2.18  
    2.19  static int do_block_io_op(blkif_t *blkif, int max_to_do);
    2.20 @@ -279,17 +278,16 @@ irqreturn_t blkif_be_int(int irq, void *
    2.21  
    2.22  static int do_block_io_op(blkif_t *blkif, int max_to_do)
    2.23  {
    2.24 -    blkif_ring_t *blk_ring = blkif->blk_ring_base;
    2.25 +    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
    2.26      blkif_request_t *req;
    2.27 -    BLKIF_RING_IDX i, rp;
    2.28 +    RING_IDX i, rp;
    2.29      int more_to_do = 0;
    2.30  
    2.31 -    rp = blk_ring->req_prod;
    2.32 +    rp = blk_ring->sring->req_prod;
    2.33      rmb(); /* Ensure we see queued requests up to 'rp'. */
    2.34  
    2.35 -    /* Take items off the comms ring, taking care not to overflow. */
    2.36 -    for ( i = blkif->blk_req_cons; 
    2.37 -          (i != rp) && ((i-blkif->blk_resp_prod) != BLKIF_RING_SIZE);
    2.38 +    for ( i = blk_ring->req_cons; 
    2.39 +         (i != rp) && !RING_REQUEST_CONS_OVERFLOW(BLKIF_RING, blk_ring, i);
    2.40            i++ )
    2.41      {
    2.42          if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
    2.43 @@ -298,7 +296,7 @@ static int do_block_io_op(blkif_t *blkif
    2.44              break;
    2.45          }
    2.46          
    2.47 -        req = &blk_ring->ring[MASK_BLKIF_IDX(i)].req;
    2.48 +        req = RING_GET_REQUEST(BLKIF_RING, blk_ring, i);
    2.49          switch ( req->operation )
    2.50          {
    2.51          case BLKIF_OP_READ:
    2.52 @@ -312,14 +310,13 @@ static int do_block_io_op(blkif_t *blkif
    2.53  
    2.54          default:
    2.55              DPRINTK("error: unknown block io operation [%d]\n",
    2.56 -                    blk_ring->ring[i].req.operation);
    2.57 -            make_response(blkif, blk_ring->ring[i].req.id, 
    2.58 -                          blk_ring->ring[i].req.operation, BLKIF_RSP_ERROR);
    2.59 +                    req->operation);
    2.60 +            make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
    2.61              break;
    2.62          }
    2.63      }
    2.64  
    2.65 -    blkif->blk_req_cons = i;
    2.66 +    blk_ring->req_cons = i;
    2.67      return more_to_do;
    2.68  }
    2.69  
    2.70 @@ -339,24 +336,26 @@ static void dispatch_probe(blkif_t *blki
    2.71  
    2.72  #ifdef CONFIG_XEN_BLKDEV_TAP_BE
    2.73      /* Grab the real frontend out of the probe message. */
    2.74 -    alt_dom = (domid_t)req->frame_and_sects[1];
    2.75 +    if (req->frame_and_sects[1] == BLKTAP_COOKIE) 
    2.76 +        blkif->is_blktap = 1;
    2.77  #endif
    2.78 -    
    2.79 +
    2.80 +
    2.81 +#ifdef CONFIG_XEN_BLKDEV_TAP_BE
    2.82      if ( HYPERVISOR_update_va_mapping_otherdomain(
    2.83          MMAP_VADDR(pending_idx, 0) >> PAGE_SHIFT,
    2.84          (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL },
    2.85 -        0, blkif->domid) ) {
    2.86 -#ifdef CONFIG_XEN_BLKDEV_TAP_BE
    2.87 -        /* That didn't work.  Try alt_dom. */
    2.88 -        if ( HYPERVISOR_update_va_mapping_otherdomain(
    2.89 -            MMAP_VADDR(pending_idx, 0) >> PAGE_SHIFT,
    2.90 -            (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL },
    2.91 -            0, alt_dom) )
    2.92 -            goto out;
    2.93 -#else  
    2.94 +        0, (blkif->is_blktap ? ID_TO_DOM(req->id) : blkif->domid) ) )
    2.95 +        
    2.96 +        goto out;
    2.97 +#else
    2.98 +    if ( HYPERVISOR_update_va_mapping_otherdomain(
    2.99 +        MMAP_VADDR(pending_idx, 0) >> PAGE_SHIFT,
   2.100 +        (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL },
   2.101 +        0, blkif->domid) ) 
   2.102 +        
   2.103          goto out;
   2.104  #endif
   2.105 -    }
   2.106      
   2.107      rsp = vbd_probe(blkif, (vdisk_t *)MMAP_VADDR(pending_idx, 0), 
   2.108                      PAGE_SIZE / sizeof(vdisk_t));
   2.109 @@ -441,7 +440,7 @@ static void dispatch_rw_block_io(blkif_t
   2.110          mcl[i].args[1] = (phys_seg[i].buffer & PAGE_MASK) | remap_prot;
   2.111          mcl[i].args[2] = 0;
   2.112  #ifdef CONFIG_XEN_BLKDEV_TAP_BE
   2.113 -        mcl[i].args[3] = (alt_dom != 0) ? alt_dom : blkif->domid;
   2.114 +        mcl[i].args[3] = (blkif->is_blktap) ? ID_TO_DOM(req->id) : blkif->domid;
   2.115  #else
   2.116          mcl[i].args[3] = blkif->domid;
   2.117  #endif
   2.118 @@ -558,16 +557,17 @@ static void make_response(blkif_t *blkif
   2.119  {
   2.120      blkif_response_t *resp;
   2.121      unsigned long     flags;
   2.122 +    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
   2.123  
   2.124      /* Place on the response ring for the relevant domain. */ 
   2.125      spin_lock_irqsave(&blkif->blk_ring_lock, flags);
   2.126 -    resp = &blkif->blk_ring_base->
   2.127 -        ring[MASK_BLKIF_IDX(blkif->blk_resp_prod)].resp;
   2.128 +    resp = RING_GET_RESPONSE(BLKIF_RING, blk_ring, blk_ring->rsp_prod_pvt);
   2.129      resp->id        = id;
   2.130      resp->operation = op;
   2.131      resp->status    = st;
   2.132      wmb(); /* Ensure other side can see the response fields. */
   2.133 -    blkif->blk_ring_base->resp_prod = ++blkif->blk_resp_prod;
   2.134 +    blk_ring->rsp_prod_pvt++;
   2.135 +    RING_PUSH_RESPONSES(BLKIF_RING, blk_ring);
   2.136      spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
   2.137  
   2.138      /* Kick the relevant domain. */
     3.1 --- a/linux-2.6.10-xen-sparse/drivers/xen/blkback/common.h	Thu Dec 30 12:01:47 2004 +0000
     3.2 +++ b/linux-2.6.10-xen-sparse/drivers/xen/blkback/common.h	Thu Dec 30 14:20:11 2004 +0000
     3.3 @@ -15,6 +15,7 @@
     3.4  #include <asm-xen/ctrl_if.h>
     3.5  #include <asm-xen/hypervisor.h>
     3.6  #include <asm-xen/xen-public/io/blkif.h>
     3.7 +#include <asm-xen/xen-public/io/ring.h>
     3.8  
     3.9  #if 0
    3.10  #define ASSERT(_p) \
    3.11 @@ -36,19 +37,17 @@ struct block_device;
    3.12  
    3.13  typedef struct blkif_st {
    3.14      /* Unique identifier for this interface. */
    3.15 -    domid_t          domid;
    3.16 -    unsigned int     handle;
    3.17 +    domid_t           domid;
    3.18 +    unsigned int      handle;
    3.19      /* Physical parameters of the comms window. */
    3.20 -    unsigned long    shmem_frame;
    3.21 -    unsigned int     evtchn;
    3.22 -    int              irq;
    3.23 +    unsigned long     shmem_frame;
    3.24 +    unsigned int      evtchn;
    3.25 +    int               irq;
    3.26      /* Comms information. */
    3.27 -    blkif_ring_t    *blk_ring_base; /* ioremap()'ed ptr to shmem_frame. */
    3.28 -    BLKIF_RING_IDX     blk_req_cons;  /* Request consumer. */
    3.29 -    BLKIF_RING_IDX     blk_resp_prod; /* Private version of resp. producer. */
    3.30 +    blkif_back_ring_t blk_ring;
    3.31      /* VBDs attached to this interface. */
    3.32 -    rb_root_t        vbd_rb;        /* Mapping from 16-bit vdevices to VBDs. */
    3.33 -    spinlock_t       vbd_lock;      /* Protects VBD mapping. */
    3.34 +    rb_root_t         vbd_rb;        /* Mapping from 16-bit vdevices to VBDs.*/
    3.35 +    spinlock_t        vbd_lock;      /* Protects VBD mapping. */
    3.36      /* Private fields. */
    3.37      enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
    3.38      /*
    3.39 @@ -56,6 +55,10 @@ typedef struct blkif_st {
    3.40       * We therefore need to store the id from the original request.
    3.41       */
    3.42      u8               disconnect_rspid;
    3.43 +#ifdef CONFIG_XEN_BLKDEV_TAP_BE
    3.44 +    /* Is this a blktap frontend */
    3.45 +    unsigned int     is_blktap;
    3.46 +#endif
    3.47      struct blkif_st *hash_next;
    3.48      struct list_head blkdev_list;
    3.49      spinlock_t       blk_ring_lock;
     4.1 --- a/linux-2.6.10-xen-sparse/drivers/xen/blkback/interface.c	Thu Dec 30 12:01:47 2004 +0000
     4.2 +++ b/linux-2.6.10-xen-sparse/drivers/xen/blkback/interface.c	Thu Dec 30 14:20:11 2004 +0000
     4.3 @@ -39,7 +39,7 @@ static void __blkif_disconnect_complete(
     4.4       * must still be notified to the remote driver.
     4.5       */
     4.6      unbind_evtchn_from_irq(blkif->evtchn);
     4.7 -    vfree(blkif->blk_ring_base);
     4.8 +    vfree(blkif->blk_ring.sring);
     4.9  
    4.10      /* Construct the deferred response message. */
    4.11      cmsg.type         = CMSG_BLKIF_BE;
    4.12 @@ -149,14 +149,15 @@ void blkif_destroy(blkif_be_destroy_t *d
    4.13  
    4.14  void blkif_connect(blkif_be_connect_t *connect)
    4.15  {
    4.16 -    domid_t       domid  = connect->domid;
    4.17 -    unsigned int  handle = connect->blkif_handle;
    4.18 -    unsigned int  evtchn = connect->evtchn;
    4.19 -    unsigned long shmem_frame = connect->shmem_frame;
    4.20 +    domid_t        domid  = connect->domid;
    4.21 +    unsigned int   handle = connect->blkif_handle;
    4.22 +    unsigned int   evtchn = connect->evtchn;
    4.23 +    unsigned long  shmem_frame = connect->shmem_frame;
    4.24      struct vm_struct *vma;
    4.25 -    pgprot_t      prot;
    4.26 -    int           error;
    4.27 -    blkif_t      *blkif;
    4.28 +    pgprot_t       prot;
    4.29 +    int            error;
    4.30 +    blkif_t       *blkif;
    4.31 +    blkif_sring_t *sring;
    4.32  
    4.33      blkif = blkif_find_by_handle(domid, handle);
    4.34      if ( unlikely(blkif == NULL) )
    4.35 @@ -195,11 +196,13 @@ void blkif_connect(blkif_be_connect_t *c
    4.36          vfree(vma->addr);
    4.37          return;
    4.38      }
    4.39 -
    4.40 +    sring = (blkif_sring_t *)vma->addr;
    4.41 +    SHARED_RING_INIT(BLKIF_RING, sring);
    4.42 +    BACK_RING_INIT(BLKIF_RING, &blkif->blk_ring, sring);
    4.43 +    
    4.44      blkif->evtchn        = evtchn;
    4.45      blkif->irq           = bind_evtchn_to_irq(evtchn);
    4.46      blkif->shmem_frame   = shmem_frame;
    4.47 -    blkif->blk_ring_base = (blkif_ring_t *)vma->addr;
    4.48      blkif->status        = CONNECTED;
    4.49      blkif_get(blkif);
    4.50  
     5.1 --- a/linux-2.6.10-xen-sparse/drivers/xen/blkfront/blkfront.c	Thu Dec 30 12:01:47 2004 +0000
     5.2 +++ b/linux-2.6.10-xen-sparse/drivers/xen/blkfront/blkfront.c	Thu Dec 30 14:20:11 2004 +0000
     5.3 @@ -6,6 +6,7 @@
     5.4   * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
     5.5   * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
     5.6   * Copyright (c) 2004, Christian Limpach
     5.7 + * Copyright (c) 2004, Andrew Warfield
     5.8   * 
     5.9   * This file may be distributed separately from the Linux kernel, or
    5.10   * incorporated into other software packages, subject to the following license:
    5.11 @@ -84,20 +85,14 @@ static unsigned int blkif_irq = 0;
    5.12  static int blkif_control_rsp_valid;
    5.13  static blkif_response_t blkif_control_rsp;
    5.14  
    5.15 -static blkif_ring_t *blk_ring = NULL;
    5.16 -static BLKIF_RING_IDX resp_cons; /* Response consumer for comms ring. */
    5.17 -static BLKIF_RING_IDX req_prod;  /* Private request producer.         */
    5.18 +static blkif_front_ring_t blk_ring;
    5.19  
    5.20  unsigned long rec_ring_free;
    5.21 -blkif_request_t rec_ring[BLKIF_RING_SIZE];
    5.22 +blkif_request_t rec_ring[RING_SIZE(BLKIF_RING, &blk_ring)];
    5.23  
    5.24  static int recovery = 0;           /* "Recovery in progress" flag.  Protected
    5.25                                      * by the blkif_io_lock */
    5.26  
    5.27 -/* We plug the I/O ring if the driver is suspended or if the ring is full. */
    5.28 -#define BLKIF_RING_FULL (((req_prod - resp_cons) == BLKIF_RING_SIZE) || \
    5.29 -                         (blkif_state != BLKIF_STATE_CONNECTED))
    5.30 -
    5.31  static void kick_pending_request_queues(void);
    5.32  
    5.33  int __init xlblk_init(void);
    5.34 @@ -108,7 +103,7 @@ static inline int GET_ID_FROM_FREELIST( 
    5.35  {
    5.36      unsigned long free = rec_ring_free;
    5.37  
    5.38 -    if ( free > BLKIF_RING_SIZE )
    5.39 +    if ( free > RING_SIZE(BLKIF_RING, &blk_ring) )
    5.40          BUG();
    5.41  
    5.42      rec_ring_free = rec_ring[free].id;
    5.43 @@ -169,8 +164,7 @@ static inline void translate_req_to_mfn(
    5.44  static inline void flush_requests(void)
    5.45  {
    5.46      DISABLE_SCATTERGATHER();
    5.47 -    wmb(); /* Ensure that the frontend can see the requests. */
    5.48 -    blk_ring->req_prod = req_prod;
    5.49 +    RING_PUSH_REQUESTS(BLKIF_RING, &blk_ring);
    5.50      notify_via_evtchn(blkif_evtchn);
    5.51  }
    5.52  
    5.53 @@ -343,7 +337,7 @@ static int blkif_queue_request(struct re
    5.54          return 1;
    5.55  
    5.56      /* Fill out a communications ring structure. */
    5.57 -    ring_req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req;
    5.58 +    ring_req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, blk_ring.req_prod_pvt);
    5.59      id = GET_ID_FROM_FREELIST();
    5.60      rec_ring[id].id = (unsigned long) req;
    5.61  
    5.62 @@ -372,8 +366,8 @@ static int blkif_queue_request(struct re
    5.63          }
    5.64      }
    5.65  
    5.66 -    req_prod++;
    5.67 -
    5.68 +    blk_ring.req_prod_pvt++;
    5.69 +    
    5.70      /* Keep a private copy so we can reissue requests when recovering. */
    5.71      translate_req_to_pfn( &rec_ring[id], ring_req);
    5.72  
    5.73 @@ -400,7 +394,7 @@ void do_blkif_request(request_queue_t *r
    5.74              continue;
    5.75          }
    5.76  
    5.77 -        if ( BLKIF_RING_FULL )
    5.78 +        if ( RING_FULL(BLKIF_RING, &blk_ring) )
    5.79          {
    5.80              blk_stop_queue(rq);
    5.81              break;
    5.82 @@ -426,9 +420,9 @@ static irqreturn_t blkif_int(int irq, vo
    5.83  {
    5.84      struct request *req;
    5.85      blkif_response_t *bret;
    5.86 -    BLKIF_RING_IDX i, rp;
    5.87 +    RING_IDX i, rp;
    5.88      unsigned long flags; 
    5.89 -
    5.90 +    
    5.91      spin_lock_irqsave(&blkif_io_lock, flags);     
    5.92  
    5.93      if ( unlikely(blkif_state == BLKIF_STATE_CLOSED) || 
    5.94 @@ -437,18 +431,17 @@ static irqreturn_t blkif_int(int irq, vo
    5.95          spin_unlock_irqrestore(&blkif_io_lock, flags);
    5.96          return IRQ_HANDLED;
    5.97      }
    5.98 -
    5.99 -    rp = blk_ring->resp_prod;
   5.100 +    
   5.101 +    rp = blk_ring.sring->rsp_prod;
   5.102      rmb(); /* Ensure we see queued responses up to 'rp'. */
   5.103  
   5.104 -    for ( i = resp_cons; i != rp; i++ )
   5.105 +    for ( i = blk_ring.rsp_cons; i != rp; i++ )
   5.106      {
   5.107  	unsigned long id;
   5.108 -        bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
   5.109  
   5.110 +        bret = RING_GET_RESPONSE(BLKIF_RING, &blk_ring, i);
   5.111  	id = bret->id;
   5.112  	req = (struct request *)rec_ring[id].id;
   5.113 -
   5.114  	blkif_completion( &rec_ring[id] );
   5.115  
   5.116  	ADD_ID_TO_FREELIST(id); /* overwrites req */
   5.117 @@ -477,9 +470,9 @@ static irqreturn_t blkif_int(int irq, vo
   5.118              BUG();
   5.119          }
   5.120      }
   5.121 +
   5.122 +    blk_ring.rsp_cons = i;
   5.123      
   5.124 -    resp_cons = i;
   5.125 -
   5.126      kick_pending_request_queues();
   5.127  
   5.128      spin_unlock_irqrestore(&blkif_io_lock, flags);
   5.129 @@ -533,10 +526,11 @@ static void kick_pending_request_queues(
   5.130  {
   5.131      /* We kick pending request queues if the ring is reasonably empty. */
   5.132      if ( (nr_pending != 0) && 
   5.133 -         ((req_prod - resp_cons) < (BLKIF_RING_SIZE >> 1)) )
   5.134 +         (RING_PENDING_REQUESTS(BLKIF_RING, &blk_ring) < 
   5.135 +            (RING_SIZE(&blk_ring) >> 1)) )
   5.136      {
   5.137          /* Attempt to drain the queue, but bail if the ring becomes full. */
   5.138 -        while ( (nr_pending != 0) && !BLKIF_RING_FULL )
   5.139 +        while ( (nr_pending != 0) && !RING_FULL(BLKIF_RING, &blk_ring) )
   5.140              do_blkif_request(pending_queues[--nr_pending]);
   5.141      }
   5.142  }
   5.143 @@ -830,8 +824,8 @@ static int blkif_queue_request(unsigned 
   5.144               (sg_dev == device) &&
   5.145               (sg_next_sect == sector_number) )
   5.146          {
   5.147 -
   5.148 -            req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod-1)].req;
   5.149 +            req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, 
   5.150 +                    blk_ring.rsp_prod_pvt - 1);
   5.151              bh = (struct buffer_head *)id;
   5.152  	    
   5.153              bh->b_reqnext = (struct buffer_head *)rec_ring[req->id].id;
   5.154 @@ -851,7 +845,7 @@ static int blkif_queue_request(unsigned 
   5.155  
   5.156              return 0;
   5.157          }
   5.158 -        else if ( BLKIF_RING_FULL )
   5.159 +        else if ( RING_FULL(BLKIF_RING, &blk_ring) )
   5.160          {
   5.161              return 1;
   5.162          }
   5.163 @@ -868,7 +862,7 @@ static int blkif_queue_request(unsigned 
   5.164      }
   5.165  
   5.166      /* Fill out a communications ring structure. */
   5.167 -    req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req;
   5.168 +    req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, blk_ring.req_prod_pvt);
   5.169  
   5.170      xid = GET_ID_FROM_FREELIST();
   5.171      rec_ring[xid].id = id;
   5.172 @@ -880,11 +874,11 @@ static int blkif_queue_request(unsigned 
   5.173      req->nr_segments   = 1;
   5.174      req->frame_and_sects[0] = buffer_ma | (fsect<<3) | lsect;
   5.175  
   5.176 -    req_prod++;
   5.177 -
   5.178      /* Keep a private copy so we can reissue requests when recovering. */    
   5.179      translate_req_to_pfn(&rec_ring[xid], req );
   5.180  
   5.181 +    blk_ring.req_prod_pvt++;
   5.182 +    
   5.183      return 0;
   5.184  }
   5.185  
   5.186 @@ -973,7 +967,7 @@ void do_blkif_request(request_queue_t *r
   5.187  
   5.188  static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
   5.189  {
   5.190 -    BLKIF_RING_IDX i, rp; 
   5.191 +    RING_IDX i, rp; 
   5.192      unsigned long flags; 
   5.193      struct buffer_head *bh, *next_bh;
   5.194      
   5.195 @@ -985,14 +979,15 @@ static void blkif_int(int irq, void *dev
   5.196          return;
   5.197      }
   5.198  
   5.199 -    rp = blk_ring->resp_prod;
   5.200 +    rp = blk_ring.rsp_prod;
   5.201      rmb(); /* Ensure we see queued responses up to 'rp'. */
   5.202  
   5.203 -    for ( i = resp_cons; i != rp; i++ )
   5.204 +    for ( i = blk_ring.rsp_cons; i != rp; i++ )
   5.205      {
   5.206  	unsigned long id;
   5.207 -        blkif_response_t *bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
   5.208 -
   5.209 +        blkif_response_t *bret;
   5.210 +        
   5.211 +        bret = RING_GET_RESPONSE(BLKIF_RING, &blkif_ring, i);
   5.212  	id = bret->id;
   5.213  	bh = (struct buffer_head *)rec_ring[id].id; 
   5.214  
   5.215 @@ -1022,10 +1017,9 @@ static void blkif_int(int irq, void *dev
   5.216          default:
   5.217              BUG();
   5.218          }
   5.219 -    }
   5.220 +
   5.221 +    blk_ring.rsp_cons = i;
   5.222      
   5.223 -    resp_cons = i;
   5.224 -
   5.225      kick_pending_request_queues();
   5.226  
   5.227      spin_unlock_irqrestore(&io_request_lock, flags);
   5.228 @@ -1039,31 +1033,33 @@ static void blkif_int(int irq, void *dev
   5.229  void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp)
   5.230  {
   5.231      unsigned long flags, id;
   5.232 +    blkif_request_t *req_d;
   5.233  
   5.234   retry:
   5.235 -    while ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
   5.236 +    while ( RING_FULL(BLKIF_RING, &blk_ring) )
   5.237      {
   5.238          set_current_state(TASK_INTERRUPTIBLE);
   5.239          schedule_timeout(1);
   5.240      }
   5.241  
   5.242      spin_lock_irqsave(&blkif_io_lock, flags);
   5.243 -    if ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
   5.244 +    if ( RING_FULL(BLKIF_RING, &blk_ring) )
   5.245      {
   5.246          spin_unlock_irqrestore(&blkif_io_lock, flags);
   5.247          goto retry;
   5.248      }
   5.249  
   5.250      DISABLE_SCATTERGATHER();
   5.251 -    blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req = *req;    
   5.252 +    req_d = RING_GET_REQUEST(BLKIF_RING, &blk_ring, blk_ring.req_prod_pvt);
   5.253 +    *req_d = *req;    
   5.254  
   5.255      id = GET_ID_FROM_FREELIST();
   5.256 -    blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req.id = id;
   5.257 +    req_d->id = id;
   5.258      rec_ring[id].id = (unsigned long) req;
   5.259  
   5.260      translate_req_to_pfn( &rec_ring[id], req );
   5.261  
   5.262 -    req_prod++;
   5.263 +    blk_ring.req_prod_pvt++;
   5.264      flush_requests();
   5.265  
   5.266      spin_unlock_irqrestore(&blkif_io_lock, flags);
   5.267 @@ -1105,7 +1101,7 @@ static void blkif_send_interface_connect
   5.268      blkif_fe_interface_connect_t *msg = (void*)cmsg.msg;
   5.269      
   5.270      msg->handle      = 0;
   5.271 -    msg->shmem_frame = (virt_to_machine(blk_ring) >> PAGE_SHIFT);
   5.272 +    msg->shmem_frame = (virt_to_machine(blk_ring.sring) >> PAGE_SHIFT);
   5.273      
   5.274      ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
   5.275  }
   5.276 @@ -1119,10 +1115,10 @@ static void blkif_free(void)
   5.277      spin_unlock_irq(&blkif_io_lock);
   5.278  
   5.279      /* Free resources associated with old device channel. */
   5.280 -    if ( blk_ring != NULL )
   5.281 +    if ( blk_ring.sring != NULL )
   5.282      {
   5.283 -        free_page((unsigned long)blk_ring);
   5.284 -        blk_ring = NULL;
   5.285 +        free_page((unsigned long)blk_ring.sring);
   5.286 +        blk_ring.sring = NULL;
   5.287      }
   5.288      free_irq(blkif_irq, NULL);
   5.289      blkif_irq = 0;
   5.290 @@ -1138,10 +1134,14 @@ static void blkif_close(void)
   5.291  /* Move from CLOSED to DISCONNECTED state. */
   5.292  static void blkif_disconnect(void)
   5.293  {
   5.294 -    if ( blk_ring != NULL )
   5.295 -        free_page((unsigned long)blk_ring);
   5.296 -    blk_ring = (blkif_ring_t *)__get_free_page(GFP_KERNEL);
   5.297 -    blk_ring->req_prod = blk_ring->resp_prod = resp_cons = req_prod = 0;
   5.298 +    blkif_sring_t *sring;
   5.299 +    
   5.300 +    if ( blk_ring.sring != NULL )
   5.301 +        free_page((unsigned long)blk_ring.sring);
   5.302 +    
   5.303 +    sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
   5.304 +    SHARED_RING_INIT(BLKIF_RING, sring);
   5.305 +    FRONT_RING_INIT(BLKIF_RING, &blk_ring, sring);
   5.306      blkif_state  = BLKIF_STATE_DISCONNECTED;
   5.307      blkif_send_interface_connect();
   5.308  }
   5.309 @@ -1155,34 +1155,37 @@ static void blkif_reset(void)
   5.310  static void blkif_recover(void)
   5.311  {
   5.312      int i;
   5.313 +    blkif_request_t *req;
   5.314  
   5.315      /* Hmm, requests might be re-ordered when we re-issue them.
   5.316       * This will need to be fixed once we have barriers */
   5.317  
   5.318      /* Stage 1 : Find active and move to safety. */
   5.319 -    for ( i = 0; i < BLKIF_RING_SIZE; i++ )
   5.320 +    for ( i = 0; i < RING_SIZE(BLKIF_RING, &blk_ring); i++ )
   5.321      {
   5.322          if ( rec_ring[i].id >= PAGE_OFFSET )
   5.323          {
   5.324 -            translate_req_to_mfn(
   5.325 -                &blk_ring->ring[req_prod].req, &rec_ring[i]);
   5.326 -            req_prod++;
   5.327 +            req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, 
   5.328 +                    blk_ring.req_prod_pvt);
   5.329 +            translate_req_to_mfn(req, &rec_ring[i]);
   5.330 +            blk_ring.req_prod_pvt++;
   5.331          }
   5.332      }
   5.333  
   5.334      /* Stage 2 : Set up shadow list. */
   5.335 -    for ( i = 0; i < req_prod; i++ ) 
   5.336 +    for ( i = 0; i < blk_ring.req_prod_pvt; i++ ) 
   5.337      {
   5.338 -        rec_ring[i].id = blk_ring->ring[i].req.id;		
   5.339 -        blk_ring->ring[i].req.id = i;
   5.340 -        translate_req_to_pfn(&rec_ring[i], &blk_ring->ring[i].req);
   5.341 +        req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, i);
   5.342 +        rec_ring[i].id = req->id;		
   5.343 +        req->id = i;
   5.344 +        translate_req_to_pfn(&rec_ring[i], req);
   5.345      }
   5.346  
   5.347      /* Stage 3 : Set up free list. */
   5.348 -    for ( ; i < BLKIF_RING_SIZE; i++ )
   5.349 +    for ( ; i < RING_SIZE(BLKIF_RING, &blk_ring); i++ )
   5.350          rec_ring[i].id = i+1;
   5.351 -    rec_ring_free = req_prod;
   5.352 -    rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
   5.353 +    rec_ring_free = blk_ring.req_prod_pvt;
   5.354 +    rec_ring[RING_SIZE(BLKIF_RING, &blk_ring)-1].id = 0x0fffffff;
   5.355  
   5.356      /* blk_ring->req_prod will be set when we flush_requests().*/
   5.357      wmb();
   5.358 @@ -1376,9 +1379,9 @@ int __init xlblk_init(void)
   5.359      printk(KERN_INFO "xen_blk: Initialising virtual block device driver\n");
   5.360  
   5.361      rec_ring_free = 0;
   5.362 -    for ( i = 0; i < BLKIF_RING_SIZE; i++ )
   5.363 +    for ( i = 0; i < RING_SIZE(BLKIF_RING, &blk_ring); i++ )
   5.364  	rec_ring[i].id = i+1;
   5.365 -    rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
   5.366 +    rec_ring[RING_SIZE(BLKIF_RING, &blk_ring)-1].id = 0x0fffffff;
   5.367  
   5.368      (void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx,
   5.369                                      CALLBACK_IN_BLOCKING_CONTEXT);
     6.1 --- a/linux-2.6.10-xen-sparse/drivers/xen/blkfront/block.h	Thu Dec 30 12:01:47 2004 +0000
     6.2 +++ b/linux-2.6.10-xen-sparse/drivers/xen/blkfront/block.h	Thu Dec 30 14:20:11 2004 +0000
     6.3 @@ -46,6 +46,7 @@
     6.4  #include <linux/devfs_fs_kernel.h>
     6.5  #include <asm-xen/xen-public/xen.h>
     6.6  #include <asm-xen/xen-public/io/blkif.h>
     6.7 +#include <asm-xen/xen-public/io/ring.h>
     6.8  #include <asm/io.h>
     6.9  #include <asm/atomic.h>
    6.10  #include <asm/uaccess.h>
     7.1 --- a/xen/include/public/io/blkif.h	Thu Dec 30 12:01:47 2004 +0000
     7.2 +++ b/xen/include/public/io/blkif.h	Thu Dec 30 14:20:11 2004 +0000
     7.3 @@ -9,6 +9,8 @@
     7.4  #ifndef __XEN_PUBLIC_IO_BLKIF_H__
     7.5  #define __XEN_PUBLIC_IO_BLKIF_H__
     7.6  
     7.7 +#include <asm-xen/xen-public/io/ring.h>
     7.8 +
     7.9  #define blkif_vdev_t   u16
    7.10  #define blkif_sector_t u64
    7.11  
    7.12 @@ -52,27 +54,11 @@ typedef struct {
    7.13  #define BLKIF_RSP_OKAY    0 /* non-specific 'okay'  */
    7.14  
    7.15  /*
    7.16 - * We use a special capitalised type name because it is _essential_ that all 
    7.17 - * arithmetic on indexes is done on an integer type of the correct size.
    7.18 - */
    7.19 -typedef u32 BLKIF_RING_IDX;
    7.20 -
    7.21 -/*
    7.22 - * Ring indexes are 'free running'. That is, they are not stored modulo the
    7.23 - * size of the ring buffer. The following macro converts a free-running counter
    7.24 - * into a value that can directly index a ring-buffer array.
    7.25 + * Generate blkif ring structures and types.
    7.26   */
    7.27 -#define MASK_BLKIF_IDX(_i) ((_i)&(BLKIF_RING_SIZE-1))
    7.28  
    7.29 -typedef struct {
    7.30 -    BLKIF_RING_IDX req_prod;  /*  0: Request producer. Updated by front-end. */
    7.31 -    BLKIF_RING_IDX resp_prod; /*  4: Response producer. Updated by back-end. */
    7.32 -    union {                   /*  8 */
    7.33 -        blkif_request_t  req;
    7.34 -        blkif_response_t resp;
    7.35 -    } PACKED ring[BLKIF_RING_SIZE];
    7.36 -} PACKED blkif_ring_t;
    7.37 -
    7.38 +#define BLKIF_RING RING_PARAMS(blkif_request_t, blkif_response_t, PAGE_SIZE)
    7.39 +DEFINE_RING_TYPES(blkif, BLKIF_RING);
    7.40  
    7.41  /*
    7.42   * BLKIF_OP_PROBE:
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/xen/include/public/io/ring.h	Thu Dec 30 14:20:11 2004 +0000
     8.3 @@ -0,0 +1,254 @@
     8.4 +/*
     8.5 + * Shared producer-consumer ring macros.
     8.6 + * Tim Deegan and Andrew Warfield November 2004.
     8.7 + */ 
     8.8 +
     8.9 +#ifndef __XEN_PUBLIC_IO_RING_H__
    8.10 +#define __XEN_PUBLIC_IO_RING_H__
    8.11 +
    8.12 +typedef unsigned int RING_IDX;
    8.13 +
    8.14 +/* This is horrible: it rounds a 32-bit unsigned constant down to the
    8.15 + * nearest power of two, by finding the highest set bit. */
    8.16 +#define __RD2PO2(_x) (((_x) & 0x80000000) ? 0x80000000 :                \
    8.17 +                      ((_x) & 0x40000000) ? 0x40000000 :                \
    8.18 +                      ((_x) & 0x20000000) ? 0x20000000 :                \
    8.19 +                      ((_x) & 0x10000000) ? 0x10000000 :                \
    8.20 +                      ((_x) & 0x08000000) ? 0x08000000 :                \
    8.21 +                      ((_x) & 0x04000000) ? 0x04000000 :                \
    8.22 +                      ((_x) & 0x02000000) ? 0x02000000 :                \
    8.23 +                      ((_x) & 0x01000000) ? 0x01000000 :                \
    8.24 +                      ((_x) & 0x00800000) ? 0x00800000 :                \
    8.25 +                      ((_x) & 0x00400000) ? 0x00400000 :                \
    8.26 +                      ((_x) & 0x00200000) ? 0x00200000 :                \
    8.27 +                      ((_x) & 0x00100000) ? 0x00100000 :                \
    8.28 +                      ((_x) & 0x00080000) ? 0x00080000 :                \
    8.29 +                      ((_x) & 0x00040000) ? 0x00040000 :                \
    8.30 +                      ((_x) & 0x00020000) ? 0x00020000 :                \
    8.31 +                      ((_x) & 0x00010000) ? 0x00010000 :                \
    8.32 +                      ((_x) & 0x00008000) ? 0x00008000 :                \
    8.33 +                      ((_x) & 0x00004000) ? 0x00004000 :                \
    8.34 +                      ((_x) & 0x00002000) ? 0x00002000 :                \
    8.35 +                      ((_x) & 0x00001000) ? 0x00001000 :                \
    8.36 +                      ((_x) & 0x00000800) ? 0x00000800 :                \
    8.37 +                      ((_x) & 0x00000400) ? 0x00000400 :                \
    8.38 +                      ((_x) & 0x00000200) ? 0x00000200 :                \
    8.39 +                      ((_x) & 0x00000100) ? 0x00000100 :                \
    8.40 +                      ((_x) & 0x00000080) ? 0x00000080 :                \
    8.41 +                      ((_x) & 0x00000040) ? 0x00000040 :                \
    8.42 +                      ((_x) & 0x00000020) ? 0x00000020 :                \
    8.43 +                      ((_x) & 0x00000010) ? 0x00000010 :                \
    8.44 +                      ((_x) & 0x00000008) ? 0x00000008 :                \
    8.45 +                      ((_x) & 0x00000004) ? 0x00000004 :                \
    8.46 +                      ((_x) & 0x00000002) ? 0x00000002 :                \
    8.47 +                      ((_x) & 0x00000001) ? 0x00000001 : 0x00000000)
    8.48 +
    8.49 +/* Given a shared ring, tell me how many entries there are in it.  The
    8.50 + * rule is: a ring contains as many entries as will fit, rounded down to
    8.51 + * the nearest power of two (so we can mask with (size-1) to loop
    8.52 + * around) */
    8.53 +#define __SRING_SIZE(__params, __esize)                                 \
    8.54 +    __RD2PO2((sizeof((__params)->size) - (2 * sizeof(RING_IDX))) / (__esize))
    8.55 +#define SRING_SIZE(__params, __sringp)                                  \
    8.56 +    __SRING_SIZE(__params, sizeof (__sringp)->ring[0])
    8.57 +
    8.58 +/*
    8.59 + *  Macros to make the correct C datatypes for a new kind of ring.
    8.60 + * 
    8.61 + *  To make a new ring datatype, you need to have two message structures,
    8.62 + *  let's say request_t, and response_t already defined.  You also need to
    8.63 + *  know how big the shared memory region you want the ring to occupy is
    8.64 + *  (PAGE_SIZE, of instance).
    8.65 + *
    8.66 + *  In a header where you want the ring datatype declared, you then do:
    8.67 + *
    8.68 + *   #define MY_RING RING_PARAMS(request_t, response_t, PAGE_SIZE)
    8.69 + *   DEFINE_RING_TYPES(mytag, MY_RING);
    8.70 + *
    8.71 + *  These expand out to give you a set of types, as you can see below.
    8.72 + *  The most important of these are:
    8.73 + *  
    8.74 + *     mytag_sring_t      - The shared ring.
    8.75 + *     mytag_front_ring_t - The 'front' half of the ring.
    8.76 + *     mytag_back_ring_t  - The 'back' half of the ring.
    8.77 + *
    8.78 + *  Use the RING_PARAMS define (MY_RING above) as a first parameter on all
    8.79 + *  the ring functions.  To initialize a ring in your code, on the front 
    8.80 + *  half, you do a:
    8.81 + *
    8.82 + *      mytag_front_ring_t front_ring;
    8.83 + *
    8.84 + *      SHARED_RING_INIT(MY_RING, (mytag_sring_t *)shared_page)
    8.85 + *      FRONT_RING_INIT(MY_RING, &front_ring, (mytag_sring_t *)shared_page)
    8.86 + *
    8.87 + *  Initializing the back follows similarly...
    8.88 + */
    8.89 +         
    8.90 +/*  NB: RING SIZING. (a note to ease future debugging...)
    8.91 + *
    8.92 + *  Passing size information into the ring macros is made difficult by 
    8.93 + *  the lack of a reasonable constant declaration in C.  To get around this,
    8.94 + *  the RING_PARAMS define places the requested size of the ring as the 
    8.95 + *  static size of the 'size' array in the anonymous RING_PARAMS struct.
    8.96 + *  While this struct is never actually instantiated, __SRING_SIZE is 
    8.97 + *  able to use sizeof() to get at the constant size.
    8.98 + */
    8.99 +
   8.100 +#define RING_PARAMS(__req_t, __rsp_t, __size)                           \
   8.101 +((struct {                                                              \
   8.102 +    char size[__size];                                                  \
   8.103 +    __req_t req;                                                        \
   8.104 +    __rsp_t rsp;                                                        \
   8.105 +                                                                        \
   8.106 +} *) 0)
   8.107 +
   8.108 +
   8.109 +#define DEFINE_RING_TYPES(__name, __params)                             \
   8.110 +                                                                        \
   8.111 +/* Shared ring entry */                                                 \
   8.112 +union __name##_sring_entry {                                            \
   8.113 +    typeof ((__params)->req) req;                                       \
   8.114 +    typeof ((__params)->rsp) rsp;                                       \
   8.115 +} PACKED;                                                               \
   8.116 +                                                                        \
   8.117 +/* Shared ring page */                                                  \
   8.118 +struct __name##_sring {                                                 \
   8.119 +    RING_IDX req_prod;                                                  \
   8.120 +    RING_IDX rsp_prod;                                                  \
   8.121 +    union __name##_sring_entry                                          \
   8.122 +        ring[__SRING_SIZE(__params, sizeof (union __name##_sring_entry))];        \
   8.123 +} PACKED;                                                               \
   8.124 +                                                                        \
   8.125 +/* "Front" end's private variables */                                   \
   8.126 +struct __name##_front_ring {                                            \
   8.127 +    RING_IDX req_prod_pvt;                                              \
   8.128 +    RING_IDX rsp_cons;                                                  \
   8.129 +    struct __name##_sring *sring;                                       \
   8.130 +};                                                                      \
   8.131 +                                                                        \
   8.132 +/* "Back" end's private variables */                                    \
   8.133 +struct __name##_back_ring {                                             \
   8.134 +    RING_IDX rsp_prod_pvt;                                              \
   8.135 +    RING_IDX req_cons;                                                  \
   8.136 +    struct __name##_sring *sring;                                       \
   8.137 +};                                                                      \
   8.138 +                                                                        \
   8.139 +/* Syntactic sugar */                                                   \
   8.140 +typedef struct __name##_sring __name##_sring_t;                         \
   8.141 +typedef struct __name##_front_ring __name##_front_ring_t;               \
   8.142 +typedef struct __name##_back_ring __name##_back_ring_t;
   8.143 +
   8.144 +/*
   8.145 + *   Macros for manipulating rings.  
   8.146 + * 
   8.147 + *   FRONT_RING_whatever works on the "front end" of a ring: here 
   8.148 + *   requests are pushed on to the ring and responses taken off it.
   8.149 + * 
   8.150 + *   BACK_RING_whatever works on the "back end" of a ring: here 
   8.151 + *   requests are taken off the ring and responses put on.
   8.152 + * 
   8.153 + *   N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.  
   8.154 + *   This is OK in 1-for-1 request-response situations where the 
   8.155 + *   requestor (front end) never has more than SRING_SIZE()-1
   8.156 + *   outstanding requests.
   8.157 + */
   8.158 +
   8.159 +
   8.160 +/* Initialising empty rings */
   8.161 +#define SHARED_RING_INIT(_p, _s) do {                                   \
   8.162 +    (_s)->req_prod = 0;                                                 \
   8.163 +    (_s)->rsp_prod = 0;                                                 \
   8.164 +} while(0)
   8.165 +
   8.166 +#define FRONT_RING_INIT(_p, _r, _s) do {                                \
   8.167 +    (_r)->req_prod_pvt = 0;                                             \
   8.168 +    (_r)->rsp_cons = 0;                                                 \
   8.169 +    (_r)->sring = (_s);                                                 \
   8.170 +} while (0)
   8.171 +
   8.172 +#define BACK_RING_INIT(_p, _r, _s) do {                                 \
   8.173 +    (_r)->rsp_prod_pvt = 0;                                             \
   8.174 +    (_r)->req_cons = 0;                                                 \
   8.175 +    (_r)->sring = (_s);                                                 \
   8.176 +} while (0)
   8.177 +
   8.178 +/* Initialize to existing shared indexes -- for recovery */
   8.179 +#define FRONT_RING_ATTACH(_p, _r, _s) do {                              \
   8.180 +    (_r)->sring = (_s);                                                 \
   8.181 +    (_r)->req_prod_pvt = (_s)->req_prod;                                \
   8.182 +    (_r)->rsp_cons = (_s)->rsp_prod;                                    \
   8.183 +} while (0)
   8.184 +
   8.185 +#define BACK_RING_ATTACH(_p, _r, _s) do {                               \
   8.186 +    (_r)->sring = (_s);                                                 \
   8.187 +    (_r)->rsp_prod_pvt = (_s)->rsp_prod;                                \
   8.188 +    (_r)->req_cons = (_s)->req_prod;                                    \
   8.189 +} while (0)
   8.190 +
   8.191 +
   8.192 +/* How to mask off a number for use as an offset into a ring 
   8.193 + * N.B. This evalutes its second argument once but its first often */
   8.194 +#define __SHARED_RING_MASK(_p, _s, _i)                                  \
   8.195 +    ((_i) & (SRING_SIZE((_p), (_s)) - 1))
   8.196 +
   8.197 +/* How big is this ring? */
   8.198 +#define RING_SIZE(_p, _r) SRING_SIZE((_p), (_r)->sring)
   8.199 +
   8.200 +/* How many empty slots are on a ring? */
   8.201 +#define RING_PENDING_REQUESTS(_p, _r)                                   \
   8.202 +   ( ((_r)->req_prod_pvt - (_r)->rsp_cons) )
   8.203 +   
   8.204 +/* Test if there is an empty slot available on the front ring. 
   8.205 + * (This is only meaningful from the front. )
   8.206 + */
   8.207 +#define RING_FULL(_p, _r)                                               \
   8.208 +    (((_r)->req_prod_pvt - (_r)->rsp_cons) == SRING_SIZE((_p), (_r)->sring))
   8.209 +
   8.210 +/* Test if there are outstanding messages to be processed on a ring. */
   8.211 +#define RING_HAS_UNCONSUMED_RESPONSES(_p, _r)                           \
   8.212 +   ( (_r)->rsp_cons != (_r)->sring->rsp_prod )
   8.213 +   
   8.214 +#define RING_HAS_UNCONSUMED_REQUESTS(_p, _r)                            \
   8.215 +   ( ((_r)->req_cons != (_r)->sring->req_prod ) &&                      \
   8.216 +     (((_r)->req_cons - (_r)->rsp_prod_pvt) !=                          \
   8.217 +      SRING_SIZE((_p), (_r)->sring)) )
   8.218 +      
   8.219 +/* Test if there are messages waiting to be pushed. */
   8.220 +#define RING_HAS_UNPUSHED_REQUESTS(_p, _r)                              \
   8.221 +   ( (_r)->req_prod_pvt != (_r)->sring->req_prod )
   8.222 +   
   8.223 +#define RING_HAS_UNPUSHED_RESPONSES(_p, _r)                             \
   8.224 +   ( (_r)->rsp_prod_pvt != (_r)->sring->rsp_prod )
   8.225 +   
   8.226 +
   8.227 +/* Copy the private producer pointer into the shared ring so the other end 
   8.228 + * can see the updates we've made. */
   8.229 +#define RING_PUSH_REQUESTS(_p, _r) do {                                 \
   8.230 +    wmb();                                                              \
   8.231 +    (_r)->sring->req_prod = (_r)->req_prod_pvt;                         \
   8.232 +} while (0)
   8.233 +
   8.234 +#define RING_PUSH_RESPONSES(_p, _r) do {                                \
   8.235 +    wmb();                                                              \
   8.236 +    (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt;                         \
   8.237 +} while (0)
   8.238 +
   8.239 +/* Direct access to individual ring elements, by index.  
   8.240 + */
   8.241 +#define RING_GET_REQUEST(_p, _r, _idx)                                  \
   8.242 + (&((_r)->sring->ring[                                                  \
   8.243 +     __SHARED_RING_MASK((_p), (_r)->sring, (_idx))                      \
   8.244 +     ].req))
   8.245 +
   8.246 +#define RING_GET_RESPONSE(_p, _r, _idx)                                 \
   8.247 + (&((_r)->sring->ring[                                                  \
   8.248 +     __SHARED_RING_MASK((_p), (_r)->sring, (_idx))                      \
   8.249 +     ].rsp))   
   8.250 +    
   8.251 +/* Loop termination condition: Would the specified index overflow the 
   8.252 + * ring? 
   8.253 + */
   8.254 +#define RING_REQUEST_CONS_OVERFLOW(_p, _r, _cons)                      \
   8.255 +    (((_cons) - (_r)->rsp_prod_pvt) >= SRING_SIZE((_p), (_r)->sring))
   8.256 +
   8.257 +#endif /* __XEN_PUBLIC_IO_RING_H__ */