ia64/xen-unstable

changeset 4235:5c446a448cfb

bitkeeper revision 1.1236.1.99 (423e8d21lC6p0xGxw7U1ExVLXpZQag)

Upgrade FreeBSD sparse tree from testing.bk to unstable.bk
Signed-off-by: ian.pratt@cl.cam.ac.uk
author iap10@tetris.cl.cam.ac.uk
date Mon Mar 21 09:00:17 2005 +0000 (2005-03-21)
parents f21c20cb7b5c
children e57dc11820ba
files freebsd-5.3-xen-sparse/i386-xen/i386-xen/clock.c freebsd-5.3-xen-sparse/i386-xen/i386-xen/ctrl_if.c freebsd-5.3-xen-sparse/i386-xen/i386-xen/evtchn.c freebsd-5.3-xen-sparse/i386-xen/i386-xen/locore.s freebsd-5.3-xen-sparse/i386-xen/i386-xen/machdep.c freebsd-5.3-xen-sparse/i386-xen/i386-xen/pmap.c freebsd-5.3-xen-sparse/i386-xen/i386-xen/vm_machdep.c freebsd-5.3-xen-sparse/i386-xen/i386-xen/xen_machdep.c freebsd-5.3-xen-sparse/i386-xen/include/evtchn.h freebsd-5.3-xen-sparse/i386-xen/include/pmap.h freebsd-5.3-xen-sparse/i386-xen/include/vmparam.h freebsd-5.3-xen-sparse/i386-xen/include/xen-os.h freebsd-5.3-xen-sparse/i386-xen/xen/blkfront/xb_blkfront.c freebsd-5.3-xen-sparse/i386-xen/xen/misc/evtchn_dev.c freebsd-5.3-xen-sparse/i386-xen/xen/netfront/xn_netfront.c
line diff
     1.1 --- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/clock.c	Mon Mar 21 08:22:32 2005 +0000
     1.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/clock.c	Mon Mar 21 09:00:17 2005 +0000
     1.3 @@ -105,6 +105,8 @@ int	statclock_disable;
     1.4  #define TIMER_FREQ   1193182
     1.5  #endif
     1.6  u_int	timer_freq = TIMER_FREQ;
     1.7 +struct mtx clock_lock;
     1.8 +
     1.9  
    1.10  static	const u_char daysinmonth[] = {31,28,31,30,31,30,31,31,30,31,30,31};
    1.11  
     2.1 --- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/ctrl_if.c	Mon Mar 21 08:22:32 2005 +0000
     2.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/ctrl_if.c	Mon Mar 21 09:00:17 2005 +0000
     2.3 @@ -39,6 +39,18 @@
     2.4  #include <machine/evtchn.h>
     2.5  
     2.6  /*
     2.7 + * Extra ring macros to sync a consumer index up to the public producer index. 
     2.8 + * Generally UNSAFE, but we use it for recovery and shutdown in some cases.
     2.9 + */
    2.10 +#define RING_DROP_PENDING_REQUESTS(_r)                                  \
    2.11 +    do {                                                                \
    2.12 +        (_r)->req_cons = (_r)->sring->req_prod;                         \
    2.13 +    } while (0)
    2.14 +#define RING_DROP_PENDING_RESPONSES(_r)                                 \
    2.15 +    do {                                                                \
    2.16 +        (_r)->rsp_cons = (_r)->sring->rsp_prod;                         \
    2.17 +    } while (0)
    2.18 +/*
    2.19   * Only used by initial domain which must create its own control-interface
    2.20   * event channel. This value is picked up by the user-space domain controller
    2.21   * via an ioctl.
    2.22 @@ -51,8 +63,8 @@ static struct mtx ctrl_if_lock;
    2.23  static int *      ctrl_if_wchan = &ctrl_if_evtchn;
    2.24  
    2.25  
    2.26 -static CONTROL_RING_IDX ctrl_if_tx_resp_cons;
    2.27 -static CONTROL_RING_IDX ctrl_if_rx_req_cons;
    2.28 +static ctrl_front_ring_t ctrl_if_tx_ring;
    2.29 +static ctrl_back_ring_t  ctrl_if_rx_ring;
    2.30  
    2.31  /* Incoming message requests. */
    2.32      /* Primary message type -> message handler. */
    2.33 @@ -85,7 +97,7 @@ TASKQUEUE_DECLARE(ctrl_if_txB);
    2.34  TASKQUEUE_DEFINE(ctrl_if_txB, NULL, NULL, {});
    2.35  struct taskqueue **taskqueue_ctrl_if_tx[2] = { &taskqueue_ctrl_if_txA,
    2.36      				               &taskqueue_ctrl_if_txB };
    2.37 -int ctrl_if_idx;
    2.38 +static int ctrl_if_idx = 0;
    2.39  
    2.40  static struct task ctrl_if_rx_tasklet;
    2.41  static struct task ctrl_if_tx_tasklet;
    2.42 @@ -95,8 +107,6 @@ static struct task ctrl_if_rxmsg_deferre
    2.43  
    2.44  
    2.45  #define get_ctrl_if() ((control_if_t *)((char *)HYPERVISOR_shared_info + 2048))
    2.46 -#define TX_FULL(_c)   \
    2.47 -    (((_c)->tx_req_prod - ctrl_if_tx_resp_cons) == CONTROL_RING_SIZE)
    2.48  
    2.49  static void 
    2.50  ctrl_if_notify_controller(void)
    2.51 @@ -114,13 +124,17 @@ ctrl_if_rxmsg_default_handler(ctrl_msg_t
    2.52  static void 
    2.53  __ctrl_if_tx_tasklet(void *context __unused, int pending __unused)
    2.54  {
    2.55 -    control_if_t *ctrl_if = get_ctrl_if();
    2.56      ctrl_msg_t   *msg;
    2.57 -    int           was_full = TX_FULL(ctrl_if);
    2.58 +    int           was_full = RING_FULL(&ctrl_if_tx_ring);
    2.59 +    RING_IDX      i, rp;
    2.60  
    2.61 -    while ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
    2.62 +    i  = ctrl_if_tx_ring.rsp_cons;
    2.63 +    rp = ctrl_if_tx_ring.sring->rsp_prod;
    2.64 +    rmb(); /* Ensure we see all requests up to 'rp'. */
    2.65 +
    2.66 +    for ( ; i != rp; i++ )
    2.67      {
    2.68 -        msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)];
    2.69 +        msg = RING_GET_RESPONSE(&ctrl_if_tx_ring, i);
    2.70  
    2.71          /* Execute the callback handler, if one was specified. */
    2.72          if ( msg->id != 0xFF )
    2.73 @@ -131,77 +145,102 @@ static void
    2.74              ctrl_if_txmsg_id_mapping[msg->id].fn = NULL;
    2.75          }
    2.76  
    2.77 -        /*
    2.78 -         * Step over the message in the ring /after/ finishing reading it. As 
    2.79 -         * soon as the index is updated then the message may get blown away.
    2.80 -         */
    2.81 -        smp_mb();
    2.82 -        ctrl_if_tx_resp_cons++;
    2.83      }
    2.84  
    2.85 -    if ( was_full && !TX_FULL(ctrl_if) )
    2.86 +    /*
    2.87 +     * Step over the message in the ring /after/ finishing reading it. As 
    2.88 +     * soon as the index is updated then the message may get blown away.
    2.89 +     */
    2.90 +    smp_mb();
    2.91 +    ctrl_if_tx_ring.rsp_cons = i;
    2.92 +
    2.93 +    if ( was_full && !RING_FULL(&ctrl_if_tx_ring) )
    2.94      {
    2.95          wakeup(ctrl_if_wchan);
    2.96  
    2.97  	/* bump idx so future enqueues will occur on the next taskq
    2.98  	 * process any currently pending tasks
    2.99  	 */
   2.100 -	ctrl_if_idx++;		
   2.101 +	ctrl_if_idx++;
   2.102          taskqueue_run(*taskqueue_ctrl_if_tx[(ctrl_if_idx-1) & 1]);
   2.103      }
   2.104 +
   2.105  }
   2.106  
   2.107  static void 
   2.108  __ctrl_if_rxmsg_deferred_task(void *context __unused, int pending __unused)
   2.109  {
   2.110      ctrl_msg_t *msg;
   2.111 +    CONTROL_RING_IDX dp;
   2.112  
   2.113 -    while ( ctrl_if_rxmsg_deferred_cons != ctrl_if_rxmsg_deferred_prod )
   2.114 +    dp = ctrl_if_rxmsg_deferred_prod;
   2.115 +    rmb(); /* Ensure we see all deferred requests up to 'dp'. */
   2.116 +    
   2.117 +    while ( ctrl_if_rxmsg_deferred_cons != dp )
   2.118      {
   2.119          msg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
   2.120              ctrl_if_rxmsg_deferred_cons++)];
   2.121          (*ctrl_if_rxmsg_handler[msg->type])(msg, 0);
   2.122      }
   2.123 +    
   2.124  }
   2.125  
   2.126  static void 
   2.127  __ctrl_if_rx_tasklet(void *context __unused, int pending __unused)
   2.128  {
   2.129 -    control_if_t *ctrl_if = get_ctrl_if();
   2.130      ctrl_msg_t    msg, *pmsg;
   2.131 +    CONTROL_RING_IDX dp;
   2.132 +    RING_IDX rp, i;
   2.133 +
   2.134 +    i  = ctrl_if_rx_ring.req_cons;
   2.135 +    rp = ctrl_if_rx_ring.sring->req_prod;
   2.136 +    dp = ctrl_if_rxmsg_deferred_prod;
   2.137  
   2.138 -    while ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
   2.139 +    rmb(); /* Ensure we see all requests up to 'rp'. */
   2.140 +    
   2.141 +    for ( ; i != rp; i++) 
   2.142      {
   2.143 -        pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons++)];
   2.144 +        pmsg = RING_GET_REQUEST(&ctrl_if_rx_ring, i);
   2.145          memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
   2.146 +	
   2.147 +	if ( msg.length > sizeof(msg.msg))
   2.148 +	    msg.length = sizeof(msg.msg);
   2.149          if ( msg.length != 0 )
   2.150              memcpy(msg.msg, pmsg->msg, msg.length);
   2.151          if ( test_bit(msg.type, &ctrl_if_rxmsg_blocking_context) )
   2.152          {
   2.153 -            pmsg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
   2.154 -                ctrl_if_rxmsg_deferred_prod++)];
   2.155 -            memcpy(pmsg, &msg, offsetof(ctrl_msg_t, msg) + msg.length);
   2.156 -            taskqueue_enqueue(taskqueue_thread, &ctrl_if_rxmsg_deferred_task);
   2.157 +            memcpy(&ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(dp++)], 
   2.158 +		    &msg, offsetof(ctrl_msg_t, msg) + msg.length);
   2.159          }
   2.160          else
   2.161          {
   2.162              (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
   2.163          }
   2.164      }
   2.165 +    ctrl_if_rx_ring.req_cons = i;
   2.166 +
   2.167 +    if ( dp != ctrl_if_rxmsg_deferred_prod )
   2.168 +    {
   2.169 +        wmb();
   2.170 +        ctrl_if_rxmsg_deferred_prod = dp;
   2.171 +        taskqueue_enqueue(taskqueue_thread, &ctrl_if_rxmsg_deferred_task);
   2.172 +    }
   2.173 +
   2.174  }
   2.175  
   2.176  static void 
   2.177  ctrl_if_interrupt(void *ctrl_sc)
   2.178  /* (int irq, void *dev_id, struct pt_regs *regs) */
   2.179  {
   2.180 -    control_if_t *ctrl_if = get_ctrl_if();
   2.181  
   2.182 -    if ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
   2.183 +    
   2.184 +    if ( RING_HAS_UNCONSUMED_RESPONSES(&ctrl_if_tx_ring) )
   2.185  	taskqueue_enqueue(taskqueue_swi, &ctrl_if_tx_tasklet);
   2.186      
   2.187  
   2.188 -    if ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
   2.189 +    if ( RING_HAS_UNCONSUMED_REQUESTS(&ctrl_if_rx_ring) )
   2.190   	taskqueue_enqueue(taskqueue_swi, &ctrl_if_rx_tasklet);
   2.191 +    
   2.192  }
   2.193  
   2.194  int 
   2.195 @@ -210,13 +249,13 @@ ctrl_if_send_message_noblock(
   2.196      ctrl_msg_handler_t hnd,
   2.197      unsigned long id)
   2.198  {
   2.199 -    control_if_t *ctrl_if = get_ctrl_if();
   2.200      unsigned long flags;
   2.201 +    ctrl_msg_t   *dmsg;
   2.202      int           i;
   2.203  
   2.204      mtx_lock_irqsave(&ctrl_if_lock, flags);
   2.205  
   2.206 -    if ( TX_FULL(ctrl_if) )
   2.207 +    if ( RING_FULL(&ctrl_if_tx_ring) )
   2.208      {
   2.209          mtx_unlock_irqrestore(&ctrl_if_lock, flags);
   2.210          return EAGAIN;
   2.211 @@ -232,10 +271,11 @@ ctrl_if_send_message_noblock(
   2.212          msg->id = i;
   2.213      }
   2.214  
   2.215 -    memcpy(&ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if->tx_req_prod)], 
   2.216 -           msg, sizeof(*msg));
   2.217 -    wmb(); /* Write the message before letting the controller peek at it. */
   2.218 -    ctrl_if->tx_req_prod++;
   2.219 +    dmsg = RING_GET_REQUEST(&ctrl_if_tx_ring, 
   2.220 +            ctrl_if_tx_ring.req_prod_pvt);
   2.221 +    memcpy(dmsg, msg, sizeof(*msg));
   2.222 +    ctrl_if_tx_ring.req_prod_pvt++;
   2.223 +    RING_PUSH_REQUESTS(&ctrl_if_tx_ring);
   2.224  
   2.225      mtx_unlock_irqrestore(&ctrl_if_lock, flags);
   2.226  
   2.227 @@ -252,34 +292,35 @@ ctrl_if_send_message_block(
   2.228      long wait_state)
   2.229  {
   2.230      int rc, sst = 0;
   2.231 -
   2.232 +    
   2.233      /* Fast path. */
   2.234 -    if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != EAGAIN )
   2.235 -        return rc;
   2.236 -
   2.237 -
   2.238 +    if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != EAGAIN ) 
   2.239 +        goto done;
   2.240 +    
   2.241      for ( ; ; )
   2.242      {
   2.243  
   2.244          if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != EAGAIN )
   2.245              break;
   2.246  
   2.247 -        if ( sst != 0) 
   2.248 -	    return EINTR;
   2.249 +        if ( sst != 0) {
   2.250 +	    rc = EINTR;
   2.251 +	    goto done;
   2.252 +	}
   2.253  
   2.254          sst = tsleep(ctrl_if_wchan, PWAIT|PCATCH, "ctlrwt", 10);
   2.255      }
   2.256 -
   2.257 + done:
   2.258 +    
   2.259      return rc;
   2.260  }
   2.261  
   2.262  int 
   2.263  ctrl_if_enqueue_space_callback(struct task *task)
   2.264  {
   2.265 -    control_if_t *ctrl_if = get_ctrl_if();
   2.266  
   2.267      /* Fast path. */
   2.268 -    if ( !TX_FULL(ctrl_if) )
   2.269 +    if ( !RING_FULL(&ctrl_if_tx_ring) )
   2.270          return 0;
   2.271  
   2.272      (void)taskqueue_enqueue(*taskqueue_ctrl_if_tx[(ctrl_if_idx & 1)], task);
   2.273 @@ -290,13 +331,12 @@ ctrl_if_enqueue_space_callback(struct ta
   2.274       * certainly return 'not full'.
   2.275       */
   2.276      smp_mb();
   2.277 -    return TX_FULL(ctrl_if);
   2.278 +    return RING_FULL(&ctrl_if_tx_ring);
   2.279  }
   2.280  
   2.281  void 
   2.282  ctrl_if_send_response(ctrl_msg_t *msg)
   2.283  {
   2.284 -    control_if_t *ctrl_if = get_ctrl_if();
   2.285      unsigned long flags;
   2.286      ctrl_msg_t   *dmsg;
   2.287  
   2.288 @@ -305,11 +345,14 @@ ctrl_if_send_response(ctrl_msg_t *msg)
   2.289       * In this situation we may have src==dst, so no copying is required.
   2.290       */
   2.291      mtx_lock_irqsave(&ctrl_if_lock, flags);
   2.292 -    dmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if->rx_resp_prod)];
   2.293 +    dmsg =  RING_GET_RESPONSE(&ctrl_if_rx_ring, 
   2.294 +			      ctrl_if_rx_ring.rsp_prod_pvt);
   2.295      if ( dmsg != msg )
   2.296          memcpy(dmsg, msg, sizeof(*msg));
   2.297 -    wmb(); /* Write the message before letting the controller peek at it. */
   2.298 -    ctrl_if->rx_resp_prod++;
   2.299 + 
   2.300 +    ctrl_if_rx_ring.rsp_prod_pvt++;
   2.301 +    RING_PUSH_RESPONSES(&ctrl_if_rx_ring);
   2.302 +
   2.303      mtx_unlock_irqrestore(&ctrl_if_lock, flags);
   2.304  
   2.305      ctrl_if_notify_controller();
   2.306 @@ -323,7 +366,7 @@ ctrl_if_register_receiver(
   2.307  {
   2.308      unsigned long _flags;
   2.309      int inuse;
   2.310 -
   2.311 +    
   2.312      mtx_lock_irqsave(&ctrl_if_lock, _flags);
   2.313  
   2.314      inuse = (ctrl_if_rxmsg_handler[type] != ctrl_if_rxmsg_default_handler);
   2.315 @@ -344,7 +387,7 @@ ctrl_if_register_receiver(
   2.316      }
   2.317  
   2.318      mtx_unlock_irqrestore(&ctrl_if_lock, _flags);
   2.319 -
   2.320 +    
   2.321      return !inuse;
   2.322  }
   2.323  
   2.324 @@ -382,6 +425,7 @@ ctrl_if_suspend(void)
   2.325      unbind_evtchn_from_irq(ctrl_if_evtchn);
   2.326  }
   2.327   
   2.328 +#if 0
   2.329  /** Reset the control interface progress pointers.
   2.330   * Marks the queues empty if 'clear' non-zero.
   2.331   */
   2.332 @@ -398,10 +442,13 @@ ctrl_if_reset(int clear)
   2.333      ctrl_if_rx_req_cons  = ctrl_if->rx_resp_prod;
   2.334  }
   2.335  
   2.336 -
   2.337 +#endif
   2.338  void 
   2.339  ctrl_if_resume(void)
   2.340  {
   2.341 +    control_if_t *ctrl_if = get_ctrl_if();
   2.342 +
   2.343 +    TRACE_ENTER;
   2.344      if ( xen_start_info->flags & SIF_INITDOMAIN )
   2.345      {
   2.346          /*
   2.347 @@ -421,7 +468,10 @@ ctrl_if_resume(void)
   2.348          initdom_ctrlif_domcontroller_port   = op.u.bind_interdomain.port2;
   2.349      }
   2.350      
   2.351 -    ctrl_if_reset(0);
   2.352 +
   2.353 +    /* Sync up with shared indexes. */
   2.354 +    FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring);
   2.355 +    BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring);
   2.356  
   2.357      ctrl_if_evtchn = xen_start_info->domain_controller_evtchn;
   2.358      ctrl_if_irq    = bind_evtchn_to_irq(ctrl_if_evtchn);
   2.359 @@ -433,17 +483,24 @@ ctrl_if_resume(void)
   2.360       */
   2.361  
   2.362      intr_add_handler("ctrl-if", ctrl_if_irq, (driver_intr_t*)ctrl_if_interrupt,
   2.363 -		     NULL, INTR_TYPE_NET | INTR_MPSAFE, NULL);
   2.364 +		     NULL, INTR_TYPE_NET, NULL);
   2.365 +    TRACE_EXIT;
   2.366 +    /* XXX currently assuming not MPSAFE */ 
   2.367  }
   2.368  
   2.369  static void 
   2.370  ctrl_if_init(void *dummy __unused)
   2.371  {
   2.372 +    control_if_t *ctrl_if = get_ctrl_if();
   2.373 +
   2.374      int i;
   2.375  
   2.376      for ( i = 0; i < 256; i++ )
   2.377          ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
   2.378      
   2.379 +    FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring);
   2.380 +    BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring);
   2.381 +
   2.382      mtx_init(&ctrl_if_lock, "ctrlif", NULL, MTX_SPIN | MTX_NOWITNESS);
   2.383      
   2.384      TASK_INIT(&ctrl_if_tx_tasklet, 0, __ctrl_if_tx_tasklet, NULL);
   2.385 @@ -452,7 +509,7 @@ ctrl_if_init(void *dummy __unused)
   2.386  
   2.387      TASK_INIT(&ctrl_if_rxmsg_deferred_task, 0, __ctrl_if_rxmsg_deferred_task, NULL);
   2.388  
   2.389 -    ctrl_if_reset(1);
   2.390 +
   2.391      ctrl_if_resume();
   2.392  }
   2.393  
   2.394 @@ -464,13 +521,13 @@ ctrl_if_init(void *dummy __unused)
   2.395  int 
   2.396  ctrl_if_transmitter_empty(void)
   2.397  {
   2.398 -    return (get_ctrl_if()->tx_req_prod == ctrl_if_tx_resp_cons);
   2.399 +    return (ctrl_if_tx_ring.sring->req_prod == ctrl_if_tx_ring.rsp_cons);
   2.400  }
   2.401  
   2.402  void 
   2.403  ctrl_if_discard_responses(void)
   2.404  {
   2.405 -    ctrl_if_tx_resp_cons = get_ctrl_if()->tx_resp_prod;
   2.406 +    RING_DROP_PENDING_RESPONSES(&ctrl_if_tx_ring);
   2.407  }
   2.408  
   2.409  SYSINIT(ctrl_if_init, SI_SUB_DRIVERS, SI_ORDER_FIRST, ctrl_if_init, NULL);
     3.1 --- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/evtchn.c	Mon Mar 21 08:22:32 2005 +0000
     3.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/evtchn.c	Mon Mar 21 09:00:17 2005 +0000
     3.3 @@ -54,9 +54,10 @@ evtchn_do_upcall(struct intrframe *frame
     3.4  {
     3.5      unsigned long  l1, l2;
     3.6      unsigned int   l1i, l2i, port;
     3.7 -    int            irq, owned;
     3.8 +    int            irq;
     3.9      unsigned long  flags;
    3.10      shared_info_t *s = HYPERVISOR_shared_info;
    3.11 +    vcpu_info_t   *vcpu_info = &s->vcpu_data[smp_processor_id()];
    3.12  
    3.13      local_irq_save(flags);
    3.14  
    3.15 @@ -64,7 +65,7 @@ evtchn_do_upcall(struct intrframe *frame
    3.16      {
    3.17          s->vcpu_data[0].evtchn_upcall_pending = 0;
    3.18          /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
    3.19 -        l1 = xen_xchg(&s->evtchn_pending_sel, 0);
    3.20 +        l1 = xen_xchg(&vcpu_info->evtchn_pending_sel, 0);
    3.21          while ( (l1i = ffs(l1)) != 0 )
    3.22          {
    3.23              l1i--;
    3.24 @@ -77,17 +78,12 @@ evtchn_do_upcall(struct intrframe *frame
    3.25                  l2 &= ~(1 << l2i);
    3.26              
    3.27                  port = (l1i << 5) + l2i;
    3.28 -		if ((owned = mtx_owned(&sched_lock)) != 0)
    3.29 -		    mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
    3.30                  if ( (irq = evtchn_to_irq[port]) != -1 ) {
    3.31  		    struct intsrc *isrc = intr_lookup_source(irq);
    3.32  		    intr_execute_handlers(isrc, frame);
    3.33 -
    3.34  		} else {
    3.35                      evtchn_device_upcall(port);
    3.36  		}
    3.37 -		if (owned)
    3.38 -		    mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
    3.39              }
    3.40          }
    3.41      }
    3.42 @@ -451,12 +447,12 @@ static struct hw_interrupt_type pirq_typ
    3.43  };
    3.44  #endif
    3.45  
    3.46 -
    3.47 +#if 0
    3.48  static void 
    3.49  misdirect_interrupt(void *sc)
    3.50  {
    3.51  }
    3.52 -
    3.53 +#endif
    3.54  void irq_suspend(void)
    3.55  {
    3.56      int virq, irq, evtchn;
    3.57 @@ -572,9 +568,12 @@ evtchn_init(void *dummy __unused)
    3.58      }
    3.59  
    3.60  #endif
    3.61 +#if 0
    3.62      (void) intr_add_handler("xb_mis", bind_virq_to_irq(VIRQ_MISDIRECT),
    3.63  	    	            (driver_intr_t *)misdirect_interrupt, 
    3.64  			    NULL, INTR_TYPE_MISC, NULL);
    3.65 +
    3.66 +#endif
    3.67  }
    3.68  
    3.69  SYSINIT(evtchn_init, SI_SUB_INTR, SI_ORDER_ANY, evtchn_init, NULL);
     4.1 --- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/locore.s	Mon Mar 21 08:22:32 2005 +0000
     4.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/locore.s	Mon Mar 21 09:00:17 2005 +0000
     4.3 @@ -56,7 +56,7 @@
     4.4  #include "assym.s"
     4.5  
     4.6  .section __xen_guest
     4.7 -	    .asciz "LOADER=generic,GUEST_VER=5.2.1,XEN_VER=2.0,BSD_SYMTAB"
     4.8 +	    .asciz "LOADER=generic,GUEST_VER=5.3,XEN_VER=3.0,BSD_SYMTAB"
     4.9  	
    4.10  	
    4.11  /*
     5.1 --- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/machdep.c	Mon Mar 21 08:22:32 2005 +0000
     5.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/machdep.c	Mon Mar 21 09:00:17 2005 +0000
     5.3 @@ -214,19 +214,7 @@ static struct trapframe proc0_tf;
     5.4  #ifndef SMP
     5.5  static struct pcpu __pcpu;
     5.6  #endif
     5.7 -
     5.8 -static void 
     5.9 -map_range(void *physptr, unsigned long physptrindex, 
    5.10 -	  unsigned long physindex, int count, unsigned int flags) {
    5.11 -    int i;
    5.12 -    unsigned long pte, ppa;
    5.13 -    for (i = 0; i < count; i++) {
    5.14 -	pte = ((unsigned long)physptr) + (physptrindex << 2) + (i << 2); 
    5.15 -	ppa = (PTOM(physindex + i) << PAGE_SHIFT) | flags | PG_V | PG_A;
    5.16 -	xpq_queue_pt_update((pt_entry_t *)pte, ppa); 
    5.17 -    }
    5.18 -    mcl_flush_queue();
    5.19 -}
    5.20 +struct mtx icu_lock;
    5.21  
    5.22  struct mem_range_softc mem_range_softc;
    5.23  
    5.24 @@ -1377,20 +1365,18 @@ getmemsize(void)
    5.25      pmap_bootstrap((init_first)<< PAGE_SHIFT, 0);
    5.26      for (i = 0; i < 10; i++)
    5.27  	phys_avail[i] = 0;
    5.28 -#ifdef MAXMEM
    5.29 -    if (MAXMEM/4 < Maxmem)
    5.30 -	Maxmem = MAXMEM/4;
    5.31 -#endif
    5.32      physmem = Maxmem;
    5.33      avail_end = ptoa(Maxmem) - round_page(MSGBUF_SIZE);
    5.34      phys_avail[0] = init_first << PAGE_SHIFT;
    5.35      phys_avail[1] = avail_end;
    5.36  }
    5.37  
    5.38 -extern pt_entry_t *KPTphys;
    5.39 -extern int kernbase;
    5.40 +extern unsigned long cpu0prvpage;
    5.41 +extern unsigned long *SMPpt;
    5.42  pteinfo_t *pteinfo_list;
    5.43  unsigned long *xen_machine_phys = ((unsigned long *)VADDR(1008, 0));
    5.44 +int preemptable;
    5.45 +int gdt_set;
    5.46  
    5.47  /* Linux infection */
    5.48  #define PAGE_OFFSET  KERNBASE
    5.49 @@ -1406,8 +1392,6 @@ initvalues(start_info_t *startinfo)
    5.50      xendebug_flags = 0xffffffff;
    5.51      /* pre-zero unused mapped pages */
    5.52      bzero((char *)(KERNBASE + (tmpindex << PAGE_SHIFT)), (1024 - tmpindex)*PAGE_SIZE); 
    5.53 -    
    5.54 -    KPTphys = (pt_entry_t *)xpmap_ptom(__pa(startinfo->pt_base + PAGE_SIZE));
    5.55      IdlePTD = (pd_entry_t *)xpmap_ptom(__pa(startinfo->pt_base));
    5.56      XENPRINTF("IdlePTD %p\n", IdlePTD);
    5.57      XENPRINTF("nr_pages: %ld shared_info: 0x%lx flags: 0x%lx pt_base: 0x%lx "
    5.58 @@ -1416,6 +1400,10 @@ initvalues(start_info_t *startinfo)
    5.59  	      xen_start_info->flags, xen_start_info->pt_base, 
    5.60  	      xen_start_info->mod_start, xen_start_info->mod_len);
    5.61      
    5.62 +    /* setup self-referential mapping first so vtomach will work */
    5.63 +    xpq_queue_pt_update(IdlePTD + PTDPTDI , (unsigned long)IdlePTD | 
    5.64 +			PG_V | PG_A);
    5.65 +    mcl_flush_queue();
    5.66      /* Map proc0's UPAGES */
    5.67      proc0uarea = (struct user *)(KERNBASE + (tmpindex << PAGE_SHIFT));
    5.68      tmpindex += UAREA_PAGES;
    5.69 @@ -1431,6 +1419,25 @@ initvalues(start_info_t *startinfo)
    5.70      /* allocate page for ldt */
    5.71      ldt = (union descriptor *)(KERNBASE + (tmpindex << PAGE_SHIFT));
    5.72      tmpindex++; 
    5.73 +#ifdef SMP
    5.74 +    /* allocate cpu0 private page */
    5.75 +    cpu0prvpage = (KERNBASE + (tmpindex << PAGE_SHIFT));
    5.76 +    tmpindex++; 
    5.77 +
    5.78 +    /* allocate SMP page table */
    5.79 +    SMPpt = (unsigned long *)(KERNBASE + (tmpindex << PAGE_SHIFT));
    5.80 +
    5.81 +    /* Map the private page into the SMP page table */
    5.82 +    SMPpt[0] = vtomach(cpu0prvpage) | PG_RW | PG_M | PG_V | PG_A;
    5.83 +
    5.84 +    /* map SMP page table RO */
    5.85 +    PT_SET_MA(SMPpt, vtomach(SMPpt) & ~PG_RW, TRUE);
    5.86 +
    5.87 +    /* put the page table into the pde */
    5.88 +    xpq_queue_pt_update(IdlePTD + MPPTDI, xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_M | PG_RW | PG_V | PG_A);
    5.89 +
    5.90 +    tmpindex++;
    5.91 +#endif
    5.92  
    5.93  #ifdef PMAP_DEBUG    
    5.94      pteinfo_list = (pteinfo_t *)(KERNBASE + (tmpindex << PAGE_SHIFT));
    5.95 @@ -1444,17 +1451,20 @@ initvalues(start_info_t *startinfo)
    5.96  	PT_CLEAR(KERNBASE + (i << PAGE_SHIFT), TRUE);
    5.97  
    5.98      /* allocate remainder of NKPT pages */
    5.99 -    map_range(IdlePTD, KPTDI + 1, tmpindex, NKPT-1, PG_U | PG_M | PG_RW);
   5.100 +    for (i = 0; i < NKPT-1; i++, tmpindex++)
   5.101 +	xpq_queue_pt_update(IdlePTD + KPTDI + i + 1, xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_M | PG_RW | PG_V | PG_A);
   5.102      tmpindex += NKPT-1;
   5.103 -    map_range(IdlePTD, PTDPTDI, __pa(xen_start_info->pt_base) >> PAGE_SHIFT, 1, 0);
   5.104 +
   5.105 +
   5.106  
   5.107 -    xpq_queue_pt_update(KPTphys + tmpindex, xen_start_info->shared_info | PG_A | PG_V | PG_RW);
   5.108 +    tmpindex += NKPT-1;
   5.109 +    PT_UPDATES_FLUSH();
   5.110 +
   5.111      HYPERVISOR_shared_info = (shared_info_t *)(KERNBASE + (tmpindex << PAGE_SHIFT));
   5.112 +    PT_SET_MA(HYPERVISOR_shared_info, xen_start_info->shared_info | PG_A | PG_V | PG_RW | PG_M, TRUE);
   5.113      tmpindex++;
   5.114  
   5.115 -    mcl_flush_queue();
   5.116      HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list = (unsigned long)xen_phys_machine;
   5.117 -    HYPERVISOR_shared_info->arch.mfn_to_pfn_start = (unsigned long)xen_machine_phys;
   5.118      
   5.119      init_first = tmpindex;
   5.120      
   5.121 @@ -1465,6 +1475,7 @@ init386(void)
   5.122  {
   5.123  	int gsel_tss, metadata_missing, off, x, error;
   5.124  	struct pcpu *pc;
   5.125 +	unsigned long gdtmachpfn;
   5.126  	trap_info_t trap_table[] = {
   5.127  	    { 0,   0, GSEL(GCODE_SEL, SEL_KPL), (unsigned long) &IDTVEC(div)},
   5.128  	    { 1,   0, GSEL(GCODE_SEL, SEL_KPL), (unsigned long) &IDTVEC(dbg)},
   5.129 @@ -1541,6 +1552,9 @@ init386(void)
   5.130  	gdt_segs[GDATA_SEL].ssd_limit = atop(0 - ((1 << 26) - (1 << 22) + (1 << 16))); 
   5.131  #endif
   5.132  #ifdef SMP
   5.133 +	/* this correspond to the cpu private page as mapped into the SMP page 
   5.134 +	 * table in initvalues
   5.135 +	 */
   5.136  	pc = &SMP_prvspace[0].pcpu;
   5.137  	gdt_segs[GPRIV_SEL].ssd_limit =
   5.138  		atop(sizeof(struct privatespace) - 1);
   5.139 @@ -1553,17 +1567,15 @@ init386(void)
   5.140  	gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
   5.141  	for (x = 0; x < NGDT; x++)
   5.142  	    ssdtosd(&gdt_segs[x], &gdt[x].sd);
   5.143 -	/* re-map GDT read-only */
   5.144 -	{
   5.145 -	    unsigned long gdtindex = (((unsigned long)gdt - KERNBASE) >> PAGE_SHIFT);
   5.146 -	    unsigned long gdtphys = PTOM(gdtindex);
   5.147 -	    map_range(KPTphys, gdtindex, gdtindex, 1, 0);
   5.148 -	    mcl_flush_queue();
   5.149 -	    if (HYPERVISOR_set_gdt(&gdtphys, LAST_RESERVED_GDT_ENTRY + 1)) {
   5.150 -		panic("set_gdt failed\n");
   5.151 -	    }
   5.152 -	    lgdt_finish();
   5.153 +
   5.154 +	PT_SET_MA(gdt, *vtopte((unsigned long)gdt) & ~PG_RW, TRUE); 
   5.155 +	gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT;
   5.156 +	if (HYPERVISOR_set_gdt(&gdtmachpfn, LAST_RESERVED_GDT_ENTRY + 1)) {
   5.157 +	    XENPRINTF("set_gdt failed\n");
   5.158 +
   5.159  	}
   5.160 +	lgdt_finish();
   5.161 +	gdt_set = 1;
   5.162  
   5.163  	if ((error = HYPERVISOR_set_trap_table(trap_table)) != 0) {
   5.164  		panic("set_trap_table failed - error %d\n", error);
   5.165 @@ -1580,7 +1592,6 @@ init386(void)
   5.166  	PCPU_SET(prvspace, pc);
   5.167  	PCPU_SET(curthread, &thread0);
   5.168  	PCPU_SET(curpcb, thread0.td_pcb);
   5.169 -	PCPU_SET(trap_nesting, 0);
   5.170  	PCPU_SET(pdir, (unsigned long)IdlePTD);
   5.171  	/*
   5.172  	 * Initialize mutexes.
   5.173 @@ -1588,6 +1599,11 @@ init386(void)
   5.174  	 */
   5.175  	mutex_init();
   5.176  
   5.177 +	mtx_init(&clock_lock, "clk", NULL, MTX_SPIN);
   5.178 +	mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
   5.179 +
   5.180 +
   5.181 +
   5.182  	/* make ldt memory segments */
   5.183  	/*
   5.184  	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  And it
   5.185 @@ -1600,14 +1616,11 @@ init386(void)
   5.186  	default_proc_ldt.ldt_base = (caddr_t)ldt;
   5.187  	default_proc_ldt.ldt_len = 6;
   5.188  	_default_ldt = (int)&default_proc_ldt;
   5.189 -	PCPU_SET(currentldt, _default_ldt);
   5.190 -	{
   5.191 -	    unsigned long ldtindex = (((unsigned long)ldt - KERNBASE) >> PAGE_SHIFT);
   5.192 -	    map_range(KPTphys, ldtindex, ldtindex, 1, 0);
   5.193 -	    mcl_flush_queue();
   5.194 -	    xen_set_ldt((unsigned long) ldt, (sizeof ldt_segs / sizeof ldt_segs[0]));
   5.195 -	}
   5.196 - 
   5.197 +	PCPU_SET(currentldt, _default_ldt)
   5.198 +	PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW, TRUE);
   5.199 +	xen_set_ldt((unsigned long) ldt, (sizeof ldt_segs / sizeof ldt_segs[0]));
   5.200 +
   5.201 +
   5.202  	/*
   5.203  	 * Initialize the console before we print anything out.
   5.204  	 */
   5.205 @@ -1638,12 +1651,15 @@ init386(void)
   5.206  	    KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16);
   5.207  	PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
   5.208  	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
   5.209 +#if 0
   5.210  	private_tss = 0;
   5.211  	PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
   5.212  	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
   5.213  	PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
   5.214 +#endif
   5.215  	HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL), PCPU_GET(common_tss.tss_esp0));
   5.216  
   5.217 +
   5.218  	dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
   5.219  	    dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
   5.220  	dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
   5.221 @@ -1667,7 +1683,6 @@ init386(void)
   5.222  	PT_UPDATES_FLUSH();
   5.223  
   5.224  	/* safe to enable xen page queue locking */
   5.225 -    	xpq_init();
   5.226  
   5.227  	msgbufinit(msgbufp, MSGBUF_SIZE);
   5.228  	/* XXX KMM I don't think we need call gates */
     6.1 --- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/pmap.c	Mon Mar 21 08:22:32 2005 +0000
     6.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/pmap.c	Mon Mar 21 09:00:17 2005 +0000
     6.3 @@ -642,6 +642,7 @@ pmap_invalidate_page(pmap_t pmap, vm_off
     6.4  		mtx_unlock_spin(&smp_rv_mtx);
     6.5  	else
     6.6  		critical_exit();
     6.7 +	PT_UPDATES_FLUSH();
     6.8  }
     6.9  
    6.10  void
    6.11 @@ -681,6 +682,7 @@ pmap_invalidate_range(pmap_t pmap, vm_of
    6.12  		mtx_unlock_spin(&smp_rv_mtx);
    6.13  	else
    6.14  		critical_exit();
    6.15 +	PT_UPDATES_FLUSH();
    6.16  }
    6.17  
    6.18  void
    6.19 @@ -716,6 +718,7 @@ pmap_invalidate_all(pmap_t pmap)
    6.20  		mtx_unlock_spin(&smp_rv_mtx);
    6.21  	else
    6.22  		critical_exit();
    6.23 +	PT_UPDATES_FLUSH();
    6.24  }
    6.25  #else /* !SMP */
    6.26  /*
     7.1 --- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/vm_machdep.c	Mon Mar 21 08:22:32 2005 +0000
     7.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/vm_machdep.c	Mon Mar 21 09:00:17 2005 +0000
     7.3 @@ -94,12 +94,13 @@
     7.4  #endif
     7.5  
     7.6  #include <machine/xenfunc.h>
     7.7 -
     7.8 +#if 0
     7.9  #ifdef SMP
    7.10  static void	cpu_reset_proxy(void);
    7.11  static u_int	cpu_reset_proxyid;
    7.12  static volatile u_int	cpu_reset_proxy_active;
    7.13  #endif
    7.14 +#endif
    7.15  static void	sf_buf_init(void *arg);
    7.16  SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
    7.17  
    7.18 @@ -462,6 +463,7 @@ kvtop(void *addr)
    7.19   * Force reset the processor by invalidating the entire address space!
    7.20   */
    7.21  
    7.22 +#if 0
    7.23  #ifdef SMP
    7.24  static void
    7.25  cpu_reset_proxy()
    7.26 @@ -473,10 +475,10 @@ cpu_reset_proxy()
    7.27  	stop_cpus((1<<cpu_reset_proxyid));
    7.28  	printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
    7.29  	DELAY(1000000);
    7.30 -	cpu_reset_real();
    7.31 +	cpu_reset();
    7.32  }
    7.33  #endif
    7.34 -
    7.35 +#endif
    7.36  void
    7.37  cpu_reset()
    7.38  {
     8.1 --- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/xen_machdep.c	Mon Mar 21 08:22:32 2005 +0000
     8.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/xen_machdep.c	Mon Mar 21 09:00:17 2005 +0000
     8.3 @@ -381,156 +381,152 @@ printk(const char *fmt, ...)
     8.4          (void)HYPERVISOR_console_write(buf, ret);
     8.5  }
     8.6  
     8.7 -#define XPQUEUE_SIZE 2048
     8.8 +#define XPQUEUE_SIZE 128
     8.9  
    8.10 -typedef struct xpq_queue {
    8.11 -    uint32_t ptr; 
    8.12 -    uint32_t val;
    8.13 -} xpq_queue_t;
    8.14 +#define MCLQUEUE_SIZE 32
    8.15 +#ifdef SMP
    8.16 +/* per-cpu queues and indices */
    8.17 +static multicall_entry_t mcl_queue[MAX_VIRT_CPUS][MCLQUEUE_SIZE];
    8.18 +static mmu_update_t xpq_queue[MAX_VIRT_CPUS][XPQUEUE_SIZE];
    8.19 +static int mcl_idx[MAX_VIRT_CPUS];  
    8.20 +static int xpq_idx[MAX_VIRT_CPUS];  
    8.21  
    8.22 -#define MCLQUEUE_SIZE 512
    8.23 +#define MCL_QUEUE mcl_queue[vcpu]
    8.24 +#define XPQ_QUEUE xpq_queue[vcpu]
    8.25 +#define MCL_IDX mcl_idx[vcpu]
    8.26 +#define XPQ_IDX xpq_idx[vcpu]
    8.27 +#define SET_VCPU() int vcpu = smp_processor_id()
    8.28 +#else
    8.29  static multicall_entry_t mcl_queue[MCLQUEUE_SIZE];
    8.30 +static mmu_update_t xpq_queue[XPQUEUE_SIZE];
    8.31  static int mcl_idx = 0;
    8.32 -
    8.33 -static xpq_queue_t xpq_queue[XPQUEUE_SIZE];
    8.34 -static boolean_t xpq_initialized;
    8.35 -static struct mtx update_lock;
    8.36  static int xpq_idx = 0;
    8.37  
    8.38 -/*
    8.39 - * Don't attempt to lock until after lock & memory initialization
    8.40 - */
    8.41 -#define XPQ_LOCK(lock, flags)		\
    8.42 -	if (likely(xpq_initialized))	\
    8.43 -    		mtx_lock_irqsave(lock, flags)
    8.44 -#define XPQ_UNLOCK(lock, flags)		\
    8.45 -	if (likely(xpq_initialized))	\
    8.46 -    		mtx_unlock_irqrestore(lock, flags)
    8.47 +#define MCL_QUEUE mcl_queue
    8.48 +#define XPQ_QUEUE xpq_queue
    8.49 +#define MCL_IDX mcl_idx
    8.50 +#define XPQ_IDX xpq_idx
    8.51 +#define SET_VCPU()
    8.52 +#endif
    8.53 +#define XPQ_IDX_INC atomic_add_int(&XPQ_IDX, 1);
    8.54 +#define MCL_IDX_INC atomic_add_int(&MCL_IDX, 1);
    8.55  
    8.56 -void 
    8.57 -xpq_init(void)
    8.58 -{
    8.59 -    xpq_initialized = TRUE;
    8.60 -    mtx_init(&update_lock, "mmu", "MMU LOCK", MTX_SPIN);
    8.61 -}
    8.62  
    8.63  static __inline void
    8.64  _xpq_flush_queue(void)
    8.65  {
    8.66 -    	int _xpq_idx = xpq_idx;
    8.67 -	int error, i;
    8.68 +    SET_VCPU();
    8.69 +    int _xpq_idx = XPQ_IDX;
    8.70 +    int error, i;
    8.71 +    /* window of vulnerability here? */
    8.72  
    8.73 -	xpq_idx = 0;
    8.74 -	/* Make sure index is cleared first to avoid double updates. */
    8.75 -	error = HYPERVISOR_mmu_update((mmu_update_t *)xpq_queue, _xpq_idx, 
    8.76 -				       NULL);
    8.77 -	
    8.78 -    	if (__predict_false(error < 0)) {
    8.79 -	    for (i = 0; i < _xpq_idx; i++)
    8.80 -		printk("val: %x ptr: %p\n", xpq_queue[i].val, xpq_queue[i].ptr);
    8.81 -	    panic("Failed to execute MMU updates: %d", error);
    8.82 -	}
    8.83 +    XPQ_IDX = 0;
    8.84 +    /* Make sure index is cleared first to avoid double updates. */
    8.85 +    error = HYPERVISOR_mmu_update((mmu_update_t *)&XPQ_QUEUE,
    8.86 +				  _xpq_idx, NULL);
    8.87 +    
    8.88 +    if (__predict_false(error < 0)) {
    8.89 +	for (i = 0; i < _xpq_idx; i++)
    8.90 +	    printk("val: %x ptr: %p\n", XPQ_QUEUE[i].val, XPQ_QUEUE[i].ptr);
    8.91 +	panic("Failed to execute MMU updates: %d", error);
    8.92 +    }
    8.93  
    8.94  }
    8.95  static void
    8.96  xpq_flush_queue(void)
    8.97  {
    8.98 -	unsigned long flags = 0;
    8.99 +    SET_VCPU();
   8.100  
   8.101 -	XPQ_LOCK(&update_lock, flags);
   8.102 -	if (xpq_idx != 0) _xpq_flush_queue();
   8.103 -	XPQ_UNLOCK(&update_lock, flags);
   8.104 +    if (XPQ_IDX != 0) _xpq_flush_queue();
   8.105  }
   8.106  
   8.107  static __inline void
   8.108  _mcl_flush_queue(void)
   8.109  {
   8.110 -    	int _mcl_idx = mcl_idx;
   8.111 -	mcl_idx = 0;
   8.112 -	(void)HYPERVISOR_multicall(mcl_queue, _mcl_idx);
   8.113 +    SET_VCPU();
   8.114 +    int _mcl_idx = MCL_IDX;
   8.115 +
   8.116 +    MCL_IDX = 0;
   8.117 +    (void)HYPERVISOR_multicall(&MCL_QUEUE, _mcl_idx);
   8.118  }
   8.119  
   8.120  void
   8.121  mcl_flush_queue(void)
   8.122  {
   8.123 -	unsigned long flags = 0;
   8.124 -
   8.125 -	XPQ_LOCK(&update_lock, flags);
   8.126 -	if (__predict_true(mcl_idx != 0)) _mcl_flush_queue();
   8.127 -	XPQ_UNLOCK(&update_lock, flags);
   8.128 -	/* XXX: until we can remove the  pervasive 
   8.129 -	 * __HYPERVISOR_update_va_mapping calls, we have 2 queues.  In order
   8.130 -	 * to ensure that they never get out of sync, only 1 flush interface
   8.131 -	 * is provided.
   8.132 -	 */
   8.133 -	xpq_flush_queue();
   8.134 +    
   8.135 +    if (__predict_true(mcl_idx != 0)) _mcl_flush_queue();
   8.136 +    /* XXX: until we can remove the  pervasive 
   8.137 +     * __HYPERVISOR_update_va_mapping calls, we have 2 queues.  In order
   8.138 +     * to ensure that they never get out of sync, only 1 flush interface
   8.139 +     * is provided.
   8.140 +     */
   8.141 +    xpq_flush_queue();
   8.142  }
   8.143  
   8.144  
   8.145  static __inline void
   8.146  xpq_increment_idx(void)
   8.147  {
   8.148 -    xpq_idx++;
   8.149 -    if (__predict_false(xpq_idx == XPQUEUE_SIZE))
   8.150 +    SET_VCPU();
   8.151 +
   8.152 +    XPQ_IDX++;
   8.153 +    if (__predict_false(XPQ_IDX == XPQUEUE_SIZE))
   8.154  	xpq_flush_queue();
   8.155  }
   8.156  
   8.157  static __inline void
   8.158  mcl_increment_idx(void)
   8.159  {
   8.160 -    mcl_idx++;
   8.161 -    if (__predict_false(mcl_idx == MCLQUEUE_SIZE))
   8.162 +    SET_VCPU();
   8.163 +    MCL_IDX++;
   8.164 +
   8.165 +    if (__predict_false(MCL_IDX == MCLQUEUE_SIZE))
   8.166  	mcl_flush_queue();
   8.167  }
   8.168  
   8.169  void
   8.170  xpq_queue_invlpg(vm_offset_t va)
   8.171  {
   8.172 -	unsigned long flags = 0;
   8.173 -
   8.174 -	XPQ_LOCK(&update_lock, flags);
   8.175 -	xpq_queue[xpq_idx].ptr = (va & ~PAGE_MASK) | MMU_EXTENDED_COMMAND;
   8.176 -	xpq_queue[xpq_idx].val = MMUEXT_INVLPG;
   8.177 -	xpq_increment_idx();
   8.178 -	XPQ_UNLOCK(&update_lock, flags);
   8.179 +    SET_VCPU();
   8.180 +    
   8.181 +    XPQ_QUEUE[XPQ_IDX].ptr = (va & ~PAGE_MASK) | MMU_EXTENDED_COMMAND;
   8.182 +    XPQ_QUEUE[XPQ_IDX].val = MMUEXT_INVLPG;
   8.183 +    xpq_increment_idx();
   8.184  }
   8.185  
   8.186  void
   8.187  load_cr3(uint32_t val)
   8.188  {
   8.189 -	xpq_queue_pt_switch(val);
   8.190 -	xpq_flush_queue();
   8.191 +    xpq_queue_pt_switch(val);
   8.192 +    xpq_flush_queue();
   8.193  }
   8.194  
   8.195  void
   8.196  xen_set_ldt(vm_offset_t base, uint32_t entries)
   8.197  {
   8.198 -	xpq_queue_set_ldt(base, entries);
   8.199 -	_xpq_flush_queue();
   8.200 +    xpq_queue_set_ldt(base, entries);
   8.201 +    _xpq_flush_queue();
   8.202  }
   8.203  
   8.204  void
   8.205  xen_machphys_update(unsigned long mfn, unsigned long pfn)
   8.206  {
   8.207 -    	unsigned long flags = 0;
   8.208 -	XPQ_LOCK(&update_lock, flags);
   8.209 -	xpq_queue[xpq_idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
   8.210 -	xpq_queue[xpq_idx].val = pfn;
   8.211 -	xpq_increment_idx();
   8.212 -	_xpq_flush_queue();
   8.213 -	XPQ_UNLOCK(&update_lock, flags);
   8.214 +    SET_VCPU();
   8.215 +    
   8.216 +    XPQ_QUEUE[XPQ_IDX].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
   8.217 +    XPQ_QUEUE[XPQ_IDX].val = pfn;
   8.218 +    xpq_increment_idx();
   8.219 +    _xpq_flush_queue();
   8.220  }
   8.221  
   8.222  void
   8.223  xpq_queue_pt_update(pt_entry_t *ptr, pt_entry_t val)
   8.224  {
   8.225 -	unsigned long flags = 0;
   8.226 -
   8.227 -	XPQ_LOCK(&update_lock, flags);
   8.228 -    	xpq_queue[xpq_idx].ptr = (uint32_t)ptr;
   8.229 -    	xpq_queue[xpq_idx].val = val;
   8.230 -    	xpq_increment_idx();
   8.231 -	XPQ_UNLOCK(&update_lock, flags);
   8.232 +    SET_VCPU();
   8.233 +    
   8.234 +    XPQ_QUEUE[XPQ_IDX].ptr = (memory_t)ptr;
   8.235 +    XPQ_QUEUE[XPQ_IDX].val = (memory_t)val;
   8.236 +    xpq_increment_idx();
   8.237  }
   8.238  
   8.239  void 
   8.240 @@ -539,14 +535,13 @@ mcl_queue_pt_update(vm_offset_t va, vm_p
   8.241  #if 0
   8.242      printf("setting va %x to ma %x\n", va, ma); 
   8.243  #endif
   8.244 -        unsigned long flags = 0;
   8.245 -        XPQ_LOCK(&update_lock, flags);
   8.246 -	mcl_queue[mcl_idx].op = __HYPERVISOR_update_va_mapping;
   8.247 -	mcl_queue[mcl_idx].args[0] = (unsigned long)(va >> PAGE_SHIFT);
   8.248 -	mcl_queue[mcl_idx].args[1] = (unsigned long)ma;
   8.249 -	mcl_queue[mcl_idx].args[2] = 0;
   8.250 -    	mcl_increment_idx();
   8.251 -	XPQ_UNLOCK(&update_lock, flags);
   8.252 +    SET_VCPU();
   8.253 +    
   8.254 +    MCL_QUEUE[MCL_IDX].op = __HYPERVISOR_update_va_mapping;
   8.255 +    MCL_QUEUE[MCL_IDX].args[0] = (unsigned long)va;
   8.256 +    MCL_QUEUE[MCL_IDX].args[1] = (unsigned long)ma;
   8.257 +    MCL_QUEUE[MCL_IDX].args[2] = UVMF_INVLPG;
   8.258 +    mcl_increment_idx();
   8.259  }
   8.260  
   8.261  
   8.262 @@ -554,72 +549,63 @@ mcl_queue_pt_update(vm_offset_t va, vm_p
   8.263  void
   8.264  xpq_queue_pt_switch(uint32_t val)
   8.265  {
   8.266 -	unsigned long flags = 0;
   8.267 -	vm_paddr_t ma = xpmap_ptom(val) & PG_FRAME;
   8.268 -
   8.269 -	XPQ_LOCK(&update_lock, flags);
   8.270 -	xpq_queue[xpq_idx].ptr = ma | MMU_EXTENDED_COMMAND;
   8.271 -	xpq_queue[xpq_idx].val = MMUEXT_NEW_BASEPTR;
   8.272 -	xpq_increment_idx();
   8.273 -	XPQ_UNLOCK(&update_lock, flags);
   8.274 +    vm_paddr_t ma = xpmap_ptom(val) & PG_FRAME;
   8.275 +    SET_VCPU();
   8.276 +    
   8.277 +    XPQ_QUEUE[XPQ_IDX].ptr = ma | MMU_EXTENDED_COMMAND;
   8.278 +    XPQ_QUEUE[XPQ_IDX].val = MMUEXT_NEW_BASEPTR;
   8.279 +    xpq_increment_idx();
   8.280  }
   8.281  
   8.282  
   8.283  void
   8.284  xpq_queue_pin_table(uint32_t pa, int type)
   8.285  {
   8.286 -	unsigned long flags = 0;
   8.287 -	XPQ_LOCK(&update_lock, flags);
   8.288 -	xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
   8.289 -	switch (type) {
   8.290 -	case XPQ_PIN_L1_TABLE:
   8.291 -		xpq_queue[xpq_idx].val = MMUEXT_PIN_L1_TABLE;
   8.292 -		break;
   8.293 -	case XPQ_PIN_L2_TABLE:
   8.294 -		xpq_queue[xpq_idx].val = MMUEXT_PIN_L2_TABLE;
   8.295 -		break;
   8.296 -	}
   8.297 -	xpq_increment_idx();
   8.298 -	XPQ_UNLOCK(&update_lock, flags);
   8.299 +    SET_VCPU();
   8.300 +    
   8.301 +    
   8.302 +    XPQ_QUEUE[XPQ_IDX].ptr = pa | MMU_EXTENDED_COMMAND;
   8.303 +    switch (type) {
   8.304 +    case XPQ_PIN_L1_TABLE:
   8.305 +	XPQ_QUEUE[XPQ_IDX].val = MMUEXT_PIN_L1_TABLE;
   8.306 +	break;
   8.307 +    case XPQ_PIN_L2_TABLE:
   8.308 +	XPQ_QUEUE[XPQ_IDX].val = MMUEXT_PIN_L2_TABLE;
   8.309 +	break;
   8.310 +    }
   8.311 +    xpq_increment_idx();
   8.312  }
   8.313  
   8.314  void
   8.315  xpq_queue_unpin_table(uint32_t pa)
   8.316  {
   8.317 -	unsigned long flags = 0;
   8.318 -
   8.319 -	XPQ_LOCK(&update_lock, flags);
   8.320 -	xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
   8.321 -	xpq_queue[xpq_idx].val = MMUEXT_UNPIN_TABLE;
   8.322 -	xpq_increment_idx();
   8.323 -	XPQ_UNLOCK(&update_lock, flags);
   8.324 +    SET_VCPU();
   8.325 +    
   8.326 +    XPQ_QUEUE[XPQ_IDX].ptr = pa | MMU_EXTENDED_COMMAND;
   8.327 +    XPQ_QUEUE[XPQ_IDX].val = MMUEXT_UNPIN_TABLE;
   8.328 +    xpq_increment_idx();
   8.329  }
   8.330  
   8.331  void
   8.332  xpq_queue_set_ldt(vm_offset_t va, uint32_t entries)
   8.333  {
   8.334 -	unsigned long flags = 0;
   8.335 -
   8.336 -	XPQ_LOCK(&update_lock, flags);
   8.337 -	KASSERT(va == (va & PG_FRAME), ("ldt not page aligned"));
   8.338 -	xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND | va;
   8.339 -	xpq_queue[xpq_idx].val = MMUEXT_SET_LDT |
   8.340 -		(entries << MMUEXT_CMD_SHIFT);
   8.341 -	xpq_increment_idx();
   8.342 -	XPQ_UNLOCK(&update_lock, flags);
   8.343 +    SET_VCPU();
   8.344 +    
   8.345 +    KASSERT(va == (va & PG_FRAME), ("ldt not page aligned"));
   8.346 +    XPQ_QUEUE[XPQ_IDX].ptr = MMU_EXTENDED_COMMAND | va;
   8.347 +    XPQ_QUEUE[XPQ_IDX].val = MMUEXT_SET_LDT |
   8.348 +	(entries << MMUEXT_CMD_SHIFT);
   8.349 +    xpq_increment_idx();
   8.350  }
   8.351  
   8.352  void
   8.353  xpq_queue_tlb_flush()
   8.354  {
   8.355 -	unsigned long flags = 0;
   8.356 -
   8.357 -	XPQ_LOCK(&update_lock, flags);
   8.358 -
   8.359 -	xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND;
   8.360 -	xpq_queue[xpq_idx].val = MMUEXT_TLB_FLUSH;
   8.361 -	xpq_increment_idx();
   8.362 -	XPQ_UNLOCK(&update_lock, flags);
   8.363 +    SET_VCPU();
   8.364 +    
   8.365 +    XPQ_QUEUE[XPQ_IDX].ptr = MMU_EXTENDED_COMMAND;
   8.366 +    XPQ_QUEUE[XPQ_IDX].val = MMUEXT_TLB_FLUSH;
   8.367 +    xpq_increment_idx();
   8.368  }
   8.369  
   8.370  
     9.1 --- a/freebsd-5.3-xen-sparse/i386-xen/include/evtchn.h	Mon Mar 21 08:22:32 2005 +0000
     9.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/include/evtchn.h	Mon Mar 21 09:00:17 2005 +0000
     9.3 @@ -9,11 +9,28 @@
     9.4  
     9.5  #ifndef __ASM_EVTCHN_H__
     9.6  #define __ASM_EVTCHN_H__
     9.7 -
     9.8 +#include <machine/pcpu.h>
     9.9  #include <machine/hypervisor.h>
    9.10  #include <machine/synch_bitops.h>
    9.11  #include <machine/hypervisor-ifs.h>
    9.12  
    9.13 +#ifdef SMP
    9.14 +#include <sys/param.h> /* XXX for time.h */
    9.15 +#include <sys/time.h> /* XXX for pcpu.h */
    9.16 +#include <sys/pcpu.h> /* XXX for PCPU_GET */
    9.17 +extern int gdt_set;
    9.18 +static inline int 
    9.19 +smp_processor_id(void)  
    9.20 +{
    9.21 +    if (likely(gdt_set))
    9.22 +	return PCPU_GET(cpuid);
    9.23 +    return 0;
    9.24 +}
    9.25 +
    9.26 +#else
    9.27 +#define smp_processor_id() 0
    9.28 +#endif
    9.29 +
    9.30  /*
    9.31   * LOW-LEVEL DEFINITIONS
    9.32   */
    9.33 @@ -38,6 +55,7 @@ static inline void
    9.34  unmask_evtchn(int port)
    9.35  {
    9.36      shared_info_t *s = HYPERVISOR_shared_info;
    9.37 +    vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
    9.38  
    9.39      synch_clear_bit(port, &s->evtchn_mask[0]);
    9.40  
    9.41 @@ -46,7 +64,7 @@ unmask_evtchn(int port)
    9.42       * a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
    9.43       */
    9.44      if (  synch_test_bit        (port,    &s->evtchn_pending[0]) && 
    9.45 -         !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
    9.46 +         !synch_test_and_set_bit(port>>5, &vcpu_info->evtchn_pending_sel) )
    9.47      {
    9.48          s->vcpu_data[0].evtchn_upcall_pending = 1;
    9.49          if ( !s->vcpu_data[0].evtchn_upcall_mask )
    10.1 --- a/freebsd-5.3-xen-sparse/i386-xen/include/pmap.h	Mon Mar 21 08:22:32 2005 +0000
    10.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/include/pmap.h	Mon Mar 21 09:00:17 2005 +0000
    10.3 @@ -149,8 +149,8 @@
    10.4   */
    10.5  
    10.6  #ifdef SMP
    10.7 -#define MPPTDI	(NPDEPTD-1)	  	  /* per cpu ptd entry */
    10.8 -#define	KPTDI 	(MPPTDI-NKPDE-XEN_PAGES	  /* start of kernel virtual pde's */
    10.9 +#define MPPTDI	(NPDEPTD-1-XEN_PAGES)	  	  /* per cpu ptd entry */
   10.10 +#define	KPTDI 	(MPPTDI-NKPDE)  /* start of kernel virtual pde's */
   10.11  #else
   10.12  #define	KPTDI	(NPDEPTD-NKPDE-XEN_PAGES) /* start of kernel virtual pde's */
   10.13  #endif	/* SMP */
    11.1 --- a/freebsd-5.3-xen-sparse/i386-xen/include/vmparam.h	Mon Mar 21 08:22:32 2005 +0000
    11.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/include/vmparam.h	Mon Mar 21 09:00:17 2005 +0000
    11.3 @@ -105,7 +105,7 @@
    11.4  #define UPT_MAX_ADDRESS		VADDR(PTDPTDI, PTDPTDI)
    11.5  #define UPT_MIN_ADDRESS		VADDR(PTDPTDI, 0)
    11.6  
    11.7 -#define VM_MAXUSER_ADDRESS	VADDR(PTDPTDI-1, 0)
    11.8 +#define VM_MAXUSER_ADDRESS	VADDR(PTDPTDI, 0)
    11.9  
   11.10  #define USRSTACK		VM_MAXUSER_ADDRESS
   11.11  
    12.1 --- a/freebsd-5.3-xen-sparse/i386-xen/include/xen-os.h	Mon Mar 21 08:22:32 2005 +0000
    12.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/include/xen-os.h	Mon Mar 21 09:00:17 2005 +0000
    12.3 @@ -6,6 +6,7 @@
    12.4  
    12.5  #ifndef _OS_H_
    12.6  #define _OS_H_
    12.7 +#include <machine/param.h>
    12.8  
    12.9  #ifndef NULL
   12.10  #define NULL (void *)0
   12.11 @@ -58,6 +59,11 @@ void printk(const char *fmt, ...);
   12.12  /* some function prototypes */
   12.13  void trap_init(void);
   12.14  
   12.15 +extern int preemptable;
   12.16 +#define preempt_disable() (preemptable = 0)
   12.17 +#define preempt_enable() (preemptable = 1)
   12.18 +#define preempt_enable_no_resched() (preemptable = 1)
   12.19 +
   12.20  
   12.21  /*
   12.22   * STI/CLI equivalents. These basically set and clear the virtual
   12.23 @@ -68,70 +74,74 @@ void trap_init(void);
   12.24  #define likely(x)  __builtin_expect((x),1)
   12.25  #define unlikely(x)  __builtin_expect((x),0)
   12.26  
   12.27 -#define __cli()                                                               \
   12.28 -do {                                                                          \
   12.29 -    HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1;              \
   12.30 -    barrier();                                                                \
   12.31 +
   12.32 +
   12.33 +#define __cli()                                                         \
   12.34 +do {                                                                    \
   12.35 +        vcpu_info_t *_vcpu;                                             \
   12.36 +        preempt_disable();                                              \
   12.37 +        _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
   12.38 +        _vcpu->evtchn_upcall_mask = 1;                                  \
   12.39 +        preempt_enable_no_resched();                                    \
   12.40 +        barrier();                                                      \
   12.41  } while (0)
   12.42  
   12.43 -#define __sti()                                                               \
   12.44 -do {                                                                          \
   12.45 -    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
   12.46 -    barrier();                                                                \
   12.47 -    _shared->vcpu_data[0].evtchn_upcall_mask = 0;                             \
   12.48 -    barrier(); /* unmask then check (avoid races) */                          \
   12.49 -    if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )              \
   12.50 -        force_evtchn_callback();                                              \
   12.51 +#define __sti()                                                         \
   12.52 +do {                                                                    \
   12.53 +        vcpu_info_t *_vcpu;                                             \
   12.54 +        barrier();                                                      \
   12.55 +        preempt_disable();                                              \
   12.56 +        _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
   12.57 +        _vcpu->evtchn_upcall_mask = 0;                                  \
   12.58 +        barrier(); /* unmask then check (avoid races) */                \
   12.59 +        if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
   12.60 +                force_evtchn_callback();                                \
   12.61 +        preempt_enable();                                               \
   12.62  } while (0)
   12.63  
   12.64 +
   12.65  #define __save_flags(x)                                                       \
   12.66  do {                                                                          \
   12.67 -    (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;            \
   12.68 -} while (0)
   12.69 -
   12.70 -#define __restore_flags(x)                                                    \
   12.71 -do {                                                                          \
   12.72 -    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
   12.73 -    barrier();                                                                \
   12.74 -    if ( (_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0 ) {            \
   12.75 -        barrier(); /* unmask then check (avoid races) */                      \
   12.76 -        if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )          \
   12.77 -            force_evtchn_callback();                                          \
   12.78 -    }                                                                         \
   12.79 +    vcpu_info_t *vcpu;                                                        \
   12.80 +    vcpu = HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];             \
   12.81 +    (x) = _vcpu->evtchn_upcall_mask;                                          \
   12.82  } while (0)
   12.83  
   12.84 -#define __save_and_cli(x)                                                     \
   12.85 -do {                                                                          \
   12.86 -    (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;            \
   12.87 -    HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1;              \
   12.88 -    barrier();                                                                \
   12.89 +#define __restore_flags(x)                                              \
   12.90 +do {                                                                    \
   12.91 +        vcpu_info_t *_vcpu;                                             \
   12.92 +        barrier();                                                      \
   12.93 +        preempt_disable();                                              \
   12.94 +        _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
   12.95 +        if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
   12.96 +                barrier(); /* unmask then check (avoid races) */        \
   12.97 +                if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
   12.98 +                        force_evtchn_callback();                        \
   12.99 +                preempt_enable();                                       \
  12.100 +        } else                                                          \
  12.101 +                preempt_enable_no_resched();                            \
  12.102  } while (0)
  12.103  
  12.104 -#define __save_and_sti(x)                                                     \
  12.105 -do {                                                                          \
  12.106 -    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
  12.107 -    barrier();                                                                \
  12.108 -    (x) = _shared->vcpu_data[0].evtchn_upcall_mask;                           \
  12.109 -    _shared->vcpu_data[0].evtchn_upcall_mask = 0;                             \
  12.110 -    barrier(); /* unmask then check (avoid races) */                          \
  12.111 -    if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )              \
  12.112 -        force_evtchn_callback();                                              \
  12.113 +
  12.114 +#define __save_and_cli(x)                                               \
  12.115 +do {                                                                    \
  12.116 +        vcpu_info_t *_vcpu;                                             \
  12.117 +        preempt_disable();                                              \
  12.118 +        _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
  12.119 +        (x) = _vcpu->evtchn_upcall_mask;                                \
  12.120 +        _vcpu->evtchn_upcall_mask = 1;                                  \
  12.121 +        preempt_enable_no_resched();                                    \
  12.122 +        barrier();                                                      \
  12.123  } while (0)
  12.124  
  12.125 -#ifdef SMP
  12.126 -/* extra macros need for the SMP case */
  12.127 -#error "global_irq_* not defined"
  12.128 -#endif
  12.129  
  12.130  #define cli() __cli()
  12.131  #define sti() __sti()
  12.132  #define save_flags(x) __save_flags(x)
  12.133  #define restore_flags(x) __restore_flags(x)
  12.134  #define save_and_cli(x) __save_and_cli(x)
  12.135 -#define save_and_sti(x) __save_and_sti(x)
  12.136  
  12.137  #define local_irq_save(x)       __save_and_cli(x)
  12.138 -#define local_irq_set(x)        __save_and_sti(x)
  12.139  #define local_irq_restore(x)    __restore_flags(x)
  12.140  #define local_irq_disable()     __cli()
  12.141  #define local_irq_enable()      __sti()
  12.142 @@ -141,9 +151,20 @@ do {                                    
  12.143  
  12.144  #define mb()
  12.145  #define rmb()
  12.146 -#define smp_mb() 
  12.147  #define wmb()
  12.148 -
  12.149 +#ifdef SMP
  12.150 +#define smp_mb() mb() 
  12.151 +#define smp_rmb() rmb()
  12.152 +#define smp_wmb() wmb()
  12.153 +#define smp_read_barrier_depends()      read_barrier_depends()
  12.154 +#define set_mb(var, value) do { xchg(&var, value); } while (0)
  12.155 +#else
  12.156 +#define smp_mb()        barrier()
  12.157 +#define smp_rmb()       barrier()
  12.158 +#define smp_wmb()       barrier()
  12.159 +#define smp_read_barrier_depends()      do { } while(0)
  12.160 +#define set_mb(var, value) do { var = value; barrier(); } while (0)
  12.161 +#endif
  12.162  
  12.163  
  12.164  /* This is a barrier for the compiler only, NOT the processor! */
    13.1 --- a/freebsd-5.3-xen-sparse/i386-xen/xen/blkfront/xb_blkfront.c	Mon Mar 21 08:22:32 2005 +0000
    13.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/xen/blkfront/xb_blkfront.c	Mon Mar 21 09:00:17 2005 +0000
    13.3 @@ -100,8 +100,10 @@ static unsigned int blkif_irq;
    13.4  static int blkif_control_rsp_valid;
    13.5  static blkif_response_t blkif_control_rsp;
    13.6  
    13.7 -static unsigned long xb_rec_ring_free;		
    13.8 -blkif_request_t xb_rec_ring[BLKIF_RING_SIZE];	/* shadow recovery ring */
    13.9 +static blkif_front_ring_t   blk_ring;
   13.10 +
   13.11 +static unsigned long rec_ring_free;		
   13.12 +blkif_request_t rec_ring[RING_SIZE(&blk_ring)];	/* shadow recovery ring */
   13.13  
   13.14  /* XXX move to xb_vbd.c when VBD update support is added */
   13.15  #define MAX_VBDS 64
   13.16 @@ -115,16 +117,10 @@ static unsigned int xb_kick_pending;
   13.17  
   13.18  static struct mtx blkif_io_lock;
   13.19  
   13.20 -static blkif_ring_t   *xb_blk_ring;
   13.21 -static BLKIF_RING_IDX xb_resp_cons; /* Response consumer for comms ring. */
   13.22 -static BLKIF_RING_IDX xb_req_prod;  /* Private request producer */
   13.23  
   13.24  static int xb_recovery = 0;           /* "Recovery in progress" flag.  Protected
   13.25                                         * by the blkif_io_lock */
   13.26  
   13.27 -/* We plug the I/O ring if the driver is suspended or if the ring is full. */
   13.28 -#define BLKIF_RING_FULL (((xb_req_prod - xb_resp_cons) == BLKIF_RING_SIZE) || \
   13.29 -                         (blkif_state != BLKIF_STATE_CONNECTED))
   13.30  
   13.31  void blkif_completion(blkif_request_t *req);
   13.32  void xb_response_intr(void *);
   13.33 @@ -135,13 +131,13 @@ void xb_response_intr(void *);
   13.34  static inline int 
   13.35  GET_ID_FROM_FREELIST( void )
   13.36  {
   13.37 -    unsigned long free = xb_rec_ring_free;
   13.38 +    unsigned long free = rec_ring_free;
   13.39  
   13.40 -    KASSERT(free <= BLKIF_RING_SIZE, ("free %lu > BLKIF_RING_SIZE", free));
   13.41 +    KASSERT(free <= RING_SIZE(&blk_ring), ("free %lu > RING_SIZE", free));
   13.42  
   13.43 -    xb_rec_ring_free = xb_rec_ring[free].id;
   13.44 +    rec_ring_free = rec_ring[free].id;
   13.45  
   13.46 -    xb_rec_ring[free].id = 0x0fffffee; /* debug */
   13.47 +    rec_ring[free].id = 0x0fffffee; /* debug */
   13.48  
   13.49      return free;
   13.50  }
   13.51 @@ -149,8 +145,8 @@ GET_ID_FROM_FREELIST( void )
   13.52  static inline void 
   13.53  ADD_ID_TO_FREELIST( unsigned long id )
   13.54  {
   13.55 -    xb_rec_ring[id].id = xb_rec_ring_free;
   13.56 -    xb_rec_ring_free = id;
   13.57 +    rec_ring[id].id = rec_ring_free;
   13.58 +    rec_ring_free = id;
   13.59  }
   13.60  
   13.61  static inline void translate_req_to_pfn(blkif_request_t *xreq,
   13.62 @@ -188,7 +184,7 @@ static inline void translate_req_to_mfn(
   13.63  
   13.64  static inline void flush_requests(void)
   13.65  {
   13.66 -    xb_blk_ring->req_prod = xb_req_prod;
   13.67 +    RING_PUSH_REQUESTS(&blk_ring);
   13.68      notify_via_evtchn(blkif_evtchn);
   13.69  }
   13.70  
   13.71 @@ -207,12 +203,9 @@ xb_response_intr(void *xsc)
   13.72      struct xb_softc *sc = NULL;
   13.73      struct bio *bp;
   13.74      blkif_response_t *bret;
   13.75 -    BLKIF_RING_IDX i, rp; 
   13.76 +    RING_IDX i, rp; 
   13.77      unsigned long flags;
   13.78      
   13.79 -    if (blkif_state == BLKIF_STATE_CLOSED)
   13.80 -	return;
   13.81 -
   13.82      mtx_lock_irqsave(&blkif_io_lock, flags);
   13.83  
   13.84      if ( unlikely(blkif_state == BLKIF_STATE_CLOSED) || 
   13.85 @@ -221,20 +214,20 @@ xb_response_intr(void *xsc)
   13.86          return;
   13.87      }
   13.88  
   13.89 -    rp = xb_blk_ring->resp_prod;
   13.90 +    rp = blk_ring.sring->rsp_prod;
   13.91      rmb(); /* Ensure we see queued responses up to 'rp'. */
   13.92  
   13.93      /* sometimes we seem to lose i/o.  stay in the interrupt handler while
   13.94       * there is stuff to process: continually recheck the response producer.
   13.95       */
   13.96 -    for ( i = xb_resp_cons; i != (rp = xb_blk_ring->resp_prod); i++ ) {
   13.97 +    for ( i = blk_ring.rsp_cons; i != (rp = blk_ring.sring->rsp_prod); i++ ) {
   13.98  	unsigned long id;
   13.99 -        bret = &xb_blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
  13.100 +        bret = RING_GET_RESPONSE(&blk_ring, i);
  13.101  
  13.102  	id = bret->id;
  13.103 -	bp = (struct bio *)xb_rec_ring[id].id;
  13.104 +	bp = (struct bio *)rec_ring[id].id;
  13.105  
  13.106 -	blkif_completion(&xb_rec_ring[id]);
  13.107 +	blkif_completion(&rec_ring[id]);
  13.108  
  13.109  	ADD_ID_TO_FREELIST(id);	/* overwrites req */
  13.110  
  13.111 @@ -277,7 +270,7 @@ xb_response_intr(void *xsc)
  13.112          }
  13.113      }
  13.114      
  13.115 -    xb_resp_cons = i;
  13.116 +    blk_ring.rsp_cons = i;
  13.117  
  13.118      if (sc && xb_kick_pending) {
  13.119      	xb_kick_pending = FALSE;
  13.120 @@ -323,8 +316,6 @@ xb_ioctl(struct disk *dp, u_long cmd, vo
  13.121  {
  13.122      struct xb_softc	*sc = (struct xb_softc *)dp->d_drv1;
  13.123  
  13.124 -    TRACE_ENTER;
  13.125 -	
  13.126      if (sc == NULL)
  13.127  	return (ENXIO);
  13.128  
  13.129 @@ -355,8 +346,8 @@ xb_startio(struct xb_softc *sc)
  13.130      s = splbio();
  13.131  
  13.132      for (bp = bioq_first(&sc->xb_bioq);
  13.133 -         bp && !BLKIF_RING_FULL;
  13.134 -	 xb_req_prod++, queued++, bp = bioq_first(&sc->xb_bioq)) {
  13.135 +         bp && !RING_FULL(&blk_ring);
  13.136 +	 blk_ring.req_prod_pvt++, queued++, bp = bioq_first(&sc->xb_bioq)) {
  13.137  	
  13.138  	/* Check if the buffer is properly aligned */
  13.139  	if ((vm_offset_t)bp->bio_data & PAGE_MASK) {
  13.140 @@ -388,9 +379,10 @@ xb_startio(struct xb_softc *sc)
  13.141  	buffer_ma &= ~PAGE_MASK;
  13.142  
  13.143      	/* Fill out a communications ring structure. */
  13.144 -    	req 		  = &xb_blk_ring->ring[MASK_BLKIF_IDX(xb_req_prod)].req;
  13.145 +    	req 		  = RING_GET_REQUEST(&blk_ring, 
  13.146 +					     blk_ring.req_prod_pvt);
  13.147  	id		  = GET_ID_FROM_FREELIST();
  13.148 -	xb_rec_ring[id].id= (unsigned long)bp;
  13.149 +	rec_ring[id].id= (unsigned long)bp;
  13.150  
  13.151      	req->id 	  = id;
  13.152      	req->operation 	  = (bp->bio_cmd == BIO_READ) ? BLKIF_OP_READ :
  13.153 @@ -409,11 +401,11 @@ xb_startio(struct xb_softc *sc)
  13.154  	req->frame_and_sects[0] = buffer_ma | (fsect << 3) | lsect; 
  13.155  
  13.156  	/* Keep a private copy so we can reissue requests when recovering. */
  13.157 -	translate_req_to_pfn( &xb_rec_ring[id], req);
  13.158 +	translate_req_to_pfn( &rec_ring[id], req);
  13.159  
  13.160      }
  13.161  
  13.162 -    if (BLKIF_RING_FULL)
  13.163 +    if (RING_FULL(&blk_ring))
  13.164  	xb_kick_pending = TRUE;
  13.165      
  13.166      if (queued != 0) 
  13.167 @@ -503,8 +495,6 @@ xb_vbdinit(void)
  13.168      blkif_response_t rsp; 
  13.169      vdisk_t *buf;
  13.170  
  13.171 -    TRACE_ENTER;
  13.172 -
  13.173      buf = (vdisk_t *)malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK);
  13.174  
  13.175      /* Probe for disk information. */
  13.176 @@ -538,28 +528,31 @@ void
  13.177  blkif_control_send(blkif_request_t *req, blkif_response_t *rsp)
  13.178  {
  13.179      unsigned long flags, id;
  13.180 +    blkif_request_t *req_d;
  13.181  
  13.182   retry:
  13.183 -    while ( (xb_req_prod - xb_resp_cons) == BLKIF_RING_SIZE ) {
  13.184 +    while ( RING_FULL(&blk_ring) )
  13.185 +    {
  13.186  	tsleep( req, PWAIT | PCATCH, "blkif", hz);
  13.187      }
  13.188  
  13.189      mtx_lock_irqsave(&blkif_io_lock, flags);
  13.190 -    if ( (xb_req_prod - xb_resp_cons) == BLKIF_RING_SIZE )
  13.191 +    if (  RING_FULL(&blk_ring) )
  13.192      {
  13.193          mtx_unlock_irqrestore(&blkif_io_lock, flags);
  13.194          goto retry;
  13.195      }
  13.196  
  13.197 -    xb_blk_ring->ring[MASK_BLKIF_IDX(xb_req_prod)].req = *req;    
  13.198 +    req_d = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
  13.199 +    *req_d = *req;    
  13.200  
  13.201      id = GET_ID_FROM_FREELIST();
  13.202 -    xb_blk_ring->ring[MASK_BLKIF_IDX(xb_req_prod)].req.id = id;
  13.203 -    xb_rec_ring[id].id = (unsigned long) req;
  13.204 +    req_d->id = id;
  13.205 +    rec_ring[id].id = (unsigned long) req;
  13.206  
  13.207 -    translate_req_to_pfn( &xb_rec_ring[id], req );
  13.208 +    translate_req_to_pfn( &rec_ring[id], req );
  13.209  
  13.210 -    xb_req_prod++;
  13.211 +    blk_ring.req_prod_pvt++;
  13.212      flush_requests();
  13.213  
  13.214      mtx_unlock_irqrestore(&blkif_io_lock, flags);
  13.215 @@ -602,7 +595,7 @@ blkif_send_interface_connect(void)
  13.216      blkif_fe_interface_connect_t *msg = (void*)cmsg.msg;
  13.217      
  13.218      msg->handle      = 0;
  13.219 -    msg->shmem_frame = (vtomach(xb_blk_ring) >> PAGE_SHIFT);
  13.220 +    msg->shmem_frame = (vtomach(blk_ring.sring) >> PAGE_SHIFT);
  13.221      
  13.222      ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
  13.223  }
  13.224 @@ -622,9 +615,9 @@ blkif_free(void)
  13.225      mtx_unlock_irqrestore(&blkif_io_lock, flags);
  13.226  
  13.227      /* Free resources associated with old device channel. */
  13.228 -    if (xb_blk_ring) {
  13.229 -        free(xb_blk_ring, M_DEVBUF);
  13.230 -        xb_blk_ring = NULL;
  13.231 +    if (blk_ring.sring != NULL) {
  13.232 +        free(blk_ring.sring, M_DEVBUF);
  13.233 +        blk_ring.sring = NULL;
  13.234      }
  13.235      /* free_irq(blkif_irq, NULL);*/
  13.236      blkif_irq = 0;
  13.237 @@ -642,10 +635,10 @@ blkif_close(void)
  13.238  static void 
  13.239  blkif_disconnect(void)
  13.240  {
  13.241 -    if (xb_blk_ring) free(xb_blk_ring, M_DEVBUF);
  13.242 -    xb_blk_ring = (blkif_ring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK);
  13.243 -    xb_blk_ring->req_prod = xb_blk_ring->resp_prod = 0;
  13.244 -    xb_resp_cons = xb_req_prod = 0;
  13.245 +    if (blk_ring.sring) free(blk_ring.sring, M_DEVBUF);
  13.246 +    blk_ring.sring = (blkif_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK);
  13.247 +    SHARED_RING_INIT(blk_ring.sring);
  13.248 +    FRONT_RING_INIT(&blk_ring, blk_ring.sring);
  13.249      blkif_state  = BLKIF_STATE_DISCONNECTED;
  13.250      blkif_send_interface_connect();
  13.251  }
  13.252 @@ -663,36 +656,39 @@ blkif_recover(void)
  13.253  {
  13.254  
  13.255      int i;
  13.256 +    blkif_request_t *req;
  13.257  
  13.258      /* Hmm, requests might be re-ordered when we re-issue them.
  13.259       * This will need to be fixed once we have barriers */
  13.260  
  13.261      /* Stage 1 : Find active and move to safety. */
  13.262 -    for ( i = 0; i < BLKIF_RING_SIZE; i++ ) {
  13.263 -        if ( xb_rec_ring[i].id >= KERNBASE ) {
  13.264 -            translate_req_to_mfn(
  13.265 -                &xb_blk_ring->ring[xb_req_prod].req, &xb_rec_ring[i]);
  13.266 -            xb_req_prod++;
  13.267 +    for ( i = 0; i < RING_SIZE(&blk_ring); i++ ) {
  13.268 +        if ( rec_ring[i].id >= KERNBASE ) {
  13.269 +	    req = RING_GET_REQUEST(&blk_ring, 
  13.270 +                                   blk_ring.req_prod_pvt);
  13.271 +	    translate_req_to_mfn(req, &rec_ring[i]);
  13.272 +            blk_ring.req_prod_pvt++;
  13.273          }
  13.274      }
  13.275  
  13.276 -    printk("blkfront: recovered %d descriptors\n",xb_req_prod);
  13.277 +    printk("blkfront: recovered %d descriptors\n",blk_ring.req_prod_pvt);
  13.278  	    
  13.279      /* Stage 2 : Set up shadow list. */
  13.280 -    for ( i = 0; i < xb_req_prod; i++ ) {
  13.281 -        xb_rec_ring[i].id = xb_blk_ring->ring[i].req.id;		
  13.282 -        xb_blk_ring->ring[i].req.id = i;
  13.283 -        translate_req_to_pfn(&xb_rec_ring[i], &xb_blk_ring->ring[i].req);
  13.284 +    for ( i = 0; i < blk_ring.req_prod_pvt; i++ ) {
  13.285 +	req = RING_GET_REQUEST(&blk_ring, i);
  13.286 +	rec_ring[i].id = req->id;
  13.287 +        req->id = i;
  13.288 +        translate_req_to_pfn(&rec_ring[i], req);
  13.289      }
  13.290  
  13.291      /* Stage 3 : Set up free list. */
  13.292 -    for ( ; i < BLKIF_RING_SIZE; i++ ){
  13.293 -        xb_rec_ring[i].id = i+1;
  13.294 +    for ( ; i < RING_SIZE(&blk_ring); i++ ){
  13.295 +        rec_ring[i].id = i+1;
  13.296      }
  13.297 -    xb_rec_ring_free = xb_req_prod;
  13.298 -    xb_rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
  13.299 +    rec_ring_free = blk_ring.req_prod_pvt;
  13.300 +    rec_ring[RING_SIZE(&blk_ring)-1].id = 0x0fffffff;
  13.301  
  13.302 -    /* xb_blk_ring->req_prod will be set when we flush_requests().*/
  13.303 +    /* blk_ring.req_prod will be set when we flush_requests().*/
  13.304      wmb();
  13.305  
  13.306      /* Switch off recovery mode, using a memory barrier to ensure that
  13.307 @@ -877,11 +873,11 @@ xb_init(void *unused)
  13.308  
  13.309      printk("[XEN] Initialising virtual block device driver\n");
  13.310  
  13.311 -    xb_rec_ring_free = 0;
  13.312 -    for (i = 0; i < BLKIF_RING_SIZE; i++) {
  13.313 -	xb_rec_ring[i].id = i+1;
  13.314 +    rec_ring_free = 0;
  13.315 +    for (i = 0; i < RING_SIZE(&blk_ring); i++) {
  13.316 +	rec_ring[i].id = i+1;
  13.317      }
  13.318 -    xb_rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
  13.319 +    rec_ring[RING_SIZE(&blk_ring)-1].id = 0x0fffffff;
  13.320  
  13.321      (void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx, 0);
  13.322  
  13.323 @@ -921,5 +917,5 @@ blkif_completion(blkif_request_t *req)
  13.324      }
  13.325      
  13.326  }
  13.327 -MTX_SYSINIT(ioreq, &blkif_io_lock, "BIO LOCK", MTX_SPIN); 
  13.328 +MTX_SYSINIT(ioreq, &blkif_io_lock, "BIO LOCK", MTX_SPIN | MTX_NOWITNESS); /* XXX how does one enroll a lock? */
  13.329  SYSINIT(xbdev, SI_SUB_PSEUDO, SI_ORDER_ANY, xb_init, NULL)
    14.1 --- a/freebsd-5.3-xen-sparse/i386-xen/xen/misc/evtchn_dev.c	Mon Mar 21 08:22:32 2005 +0000
    14.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/xen/misc/evtchn_dev.c	Mon Mar 21 09:00:17 2005 +0000
    14.3 @@ -46,8 +46,10 @@ static devfs_handle_t xen_dev_dir;
    14.4  static unsigned long evtchn_dev_inuse;
    14.5  
    14.6  /* Notification ring, accessed via /dev/xen/evtchn. */
    14.7 -#define RING_SIZE     2048  /* 2048 16-bit entries */
    14.8 -#define RING_MASK(_i) ((_i)&(RING_SIZE-1))
    14.9 +
   14.10 +#define EVTCHN_RING_SIZE     2048  /* 2048 16-bit entries */
   14.11 +
   14.12 +#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
   14.13  static uint16_t *ring;
   14.14  static unsigned int ring_cons, ring_prod, ring_overflow;
   14.15  
   14.16 @@ -76,8 +78,8 @@ evtchn_device_upcall(int port)
   14.17      clear_evtchn(port);
   14.18  
   14.19      if ( ring != NULL ) {
   14.20 -        if ( (ring_prod - ring_cons) < RING_SIZE ) {
   14.21 -            ring[RING_MASK(ring_prod)] = (uint16_t)port;
   14.22 +        if ( (ring_prod - ring_cons) < EVTCHN_RING_SIZE ) {
   14.23 +            ring[EVTCHN_RING_MASK(ring_prod)] = (uint16_t)port;
   14.24              if ( ring_cons == ring_prod++ ) {
   14.25  		wakeup(evtchn_waddr);
   14.26              }
   14.27 @@ -136,9 +138,9 @@ evtchn_read(struct cdev *dev, struct uio
   14.28      }
   14.29  
   14.30      /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
   14.31 -    if ( ((c ^ p) & RING_SIZE) != 0 ) {
   14.32 -        bytes1 = (RING_SIZE - RING_MASK(c)) * sizeof(uint16_t);
   14.33 -        bytes2 = RING_MASK(p) * sizeof(uint16_t);
   14.34 +    if ( ((c ^ p) & EVTCHN_RING_SIZE) != 0 ) {
   14.35 +        bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * sizeof(uint16_t);
   14.36 +        bytes2 = EVTCHN_RING_MASK(p) * sizeof(uint16_t);
   14.37      }
   14.38      else {
   14.39          bytes1 = (p - c) * sizeof(uint16_t);
   14.40 @@ -154,7 +156,7 @@ evtchn_read(struct cdev *dev, struct uio
   14.41          bytes2 = count - bytes1;
   14.42      }
   14.43      
   14.44 -    if ( uiomove(&ring[RING_MASK(c)], bytes1, uio) ||
   14.45 +    if ( uiomove(&ring[EVTCHN_RING_MASK(c)], bytes1, uio) ||
   14.46           ((bytes2 != 0) && uiomove(&ring[0], bytes2, uio)))
   14.47  	  /* keeping this around as its replacement is not equivalent 
   14.48  	   * copyout(&ring[0], &buf[bytes1], bytes2) 
    15.1 --- a/freebsd-5.3-xen-sparse/i386-xen/xen/netfront/xn_netfront.c	Mon Mar 21 08:22:32 2005 +0000
    15.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/xen/netfront/xn_netfront.c	Mon Mar 21 09:00:17 2005 +0000
    15.3 @@ -264,7 +264,7 @@ static int
    15.4  netctrl_connected(void)
    15.5  {
    15.6      int ok;
    15.7 -
    15.8 +    XENPRINTF("err %d up %d\n", netctrl.err, netctrl.up);
    15.9      if (netctrl.err)
   15.10  	ok = netctrl.err;
   15.11      else if (netctrl.up == NETIF_DRIVER_STATUS_UP)
   15.12 @@ -424,8 +424,7 @@ xn_alloc_rx_buffers(struct xn_softc *sc)
   15.13  		= INVALID_P2M_ENTRY;
   15.14  	    	
   15.15  	xn_rx_mcl[i].op = __HYPERVISOR_update_va_mapping;
   15.16 -	xn_rx_mcl[i].args[0] = (unsigned long)mtod(m_new,vm_offset_t) 
   15.17 -						>> PAGE_SHIFT;
   15.18 +	xn_rx_mcl[i].args[0] = (unsigned long)mtod(m_new,vm_offset_t);
   15.19  	xn_rx_mcl[i].args[1] = 0;
   15.20  	xn_rx_mcl[i].args[2] = 0;
   15.21  
   15.22 @@ -520,7 +519,7 @@ xn_rxeof(struct xn_softc *sc)
   15.23  	mmu->val = (unsigned long)m->m_ext.ext_args >> PAGE_SHIFT;
   15.24  	mmu++;
   15.25  	mcl->op = __HYPERVISOR_update_va_mapping;
   15.26 -	mcl->args[0] = (unsigned long)m->m_data >> PAGE_SHIFT;
   15.27 +	mcl->args[0] = (unsigned long)m->m_data;
   15.28  	mcl->args[1] = (rx->addr & ~PAGE_MASK) | PG_KERNEL;
   15.29  	mcl->args[2] = 0;
   15.30  	mcl++;
   15.31 @@ -1303,7 +1302,6 @@ netif_driver_status(netif_fe_driver_stat
   15.32  static void 
   15.33  netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
   15.34  {
   15.35 -
   15.36      switch ( msg->subtype )
   15.37      {
   15.38      case CMSG_NETIF_FE_INTERFACE_STATUS:
   15.39 @@ -1326,7 +1324,7 @@ netif_ctrlif_rx(ctrl_msg_t *msg, unsigne
   15.40          break;
   15.41      }
   15.42  
   15.43 -    ctrl_if_send_response(msg);
   15.44 +    ctrl_if_send_response(msg);   
   15.45  }
   15.46  
   15.47  #if 1
   15.48 @@ -1338,7 +1336,6 @@ static int probe_interfaces(void)
   15.49  {
   15.50      int err = 0, conn = 0;
   15.51      int wait_i, wait_n = 100;
   15.52 -
   15.53      for ( wait_i = 0; wait_i < wait_n; wait_i++)
   15.54      { 
   15.55          XENPRINTF("> wait_i=%d\n", wait_i);
   15.56 @@ -1421,7 +1418,7 @@ xn_init(void *unused)
   15.57  {
   15.58      
   15.59      int err = 0;
   15.60 -
   15.61 +    
   15.62      netctrl_init();
   15.63      (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx,
   15.64  				    CALLBACK_IN_BLOCKING_CONTEXT);