ia64/xen-unstable

changeset 1209:1955d4f31629

bitkeeper revision 1.820 (40600453bCkH6oPCubNeqIe3OBUXGQ)

io.h:
new file
Many files:
Further IO virtualisation patches.
author kaf24@scramble.cl.cam.ac.uk
date Tue Mar 23 09:33:07 2004 +0000 (2004-03-23)
parents bf992264fed2
children 9f85adafc1e1
files .rootkeys xen/common/domain.c xen/common/physdev.c xen/include/hypervisor-ifs/network.h xen/include/xeno/vif.h xen/net/dev.c xenolinux-2.4.25-sparse/arch/xeno/config.in xenolinux-2.4.25-sparse/arch/xeno/drivers/network/network.c xenolinux-2.4.25-sparse/arch/xeno/drivers/vnetif/vnetif.c xenolinux-2.4.25-sparse/arch/xeno/kernel/hypervisor.c xenolinux-2.4.25-sparse/arch/xeno/kernel/i386_ksyms.c xenolinux-2.4.25-sparse/arch/xeno/kernel/physirq.c xenolinux-2.4.25-sparse/include/asm-xeno/io.h xenolinux-2.4.25-sparse/mkbuildtree
line diff
     1.1 --- a/.rootkeys	Tue Mar 23 08:30:15 2004 +0000
     1.2 +++ b/.rootkeys	Tue Mar 23 09:33:07 2004 +0000
     1.3 @@ -666,6 +666,7 @@ 3e5a4e66SYp_UpAVcF8Lc1wa3Qtgzw xenolinux
     1.4  3e5a4e67w_DWgjIJ17Tlossu1LGujQ xenolinux-2.4.25-sparse/include/asm-xeno/highmem.h
     1.5  3e5a4e67YtcyDLQsShhCfQwPSELfvA xenolinux-2.4.25-sparse/include/asm-xeno/hw_irq.h
     1.6  3e5a4e677VBavzM1UZIEcH1B-RlXMA xenolinux-2.4.25-sparse/include/asm-xeno/hypervisor.h
     1.7 +4060044fVx7-tokvNLKBf_6qBB4lqQ xenolinux-2.4.25-sparse/include/asm-xeno/io.h
     1.8  3e5a4e673p7PEOyHFm3nHkYX6HQYBg xenolinux-2.4.25-sparse/include/asm-xeno/irq.h
     1.9  3ead095db_LRUXnxaqs0dA1DWhPoQQ xenolinux-2.4.25-sparse/include/asm-xeno/keyboard.h
    1.10  3e5a4e678ddsQOpbSiRdy1GRcDc9WA xenolinux-2.4.25-sparse/include/asm-xeno/mmu_context.h
     2.1 --- a/xen/common/domain.c	Tue Mar 23 08:30:15 2004 +0000
     2.2 +++ b/xen/common/domain.c	Tue Mar 23 09:33:07 2004 +0000
     2.3 @@ -150,7 +150,7 @@ void __kill_domain(struct task_struct *p
     2.4      if ( !sched_rem_domain(p) )
     2.5          return;
     2.6  
     2.7 -    printk("Killing domain %llu\n", p->domain);
     2.8 +    DPRINTK("Killing domain %llu\n", p->domain);
     2.9  
    2.10      unlink_blkdev_info(p);
    2.11  
    2.12 @@ -482,7 +482,7 @@ void release_task(struct task_struct *p)
    2.13      ASSERT(p->state == TASK_DYING);
    2.14      ASSERT(!p->has_cpu);
    2.15  
    2.16 -    printk("Releasing task %llu\n", p->domain);
    2.17 +    DPRINTK("Releasing task %llu\n", p->domain);
    2.18  
    2.19      /*
    2.20       * This frees up blkdev rings and vbd-access lists. Totally safe since
     3.1 --- a/xen/common/physdev.c	Tue Mar 23 08:30:15 2004 +0000
     3.2 +++ b/xen/common/physdev.c	Tue Mar 23 09:33:07 2004 +0000
     3.3 @@ -24,7 +24,15 @@
     3.4   * size of teh region, is faked out by a very simple state machine, 
     3.5   * preventing direct writes to the PCI config registers by a guest.
     3.6   *
     3.7 - * XXX Some comment on IRQ handling
     3.8 + * Interrupt handling is currently done in a very cheese fashion.
     3.9 + * We take the default irq controller code and replace it with our own.
    3.10 + * If an interrupt comes in it is acked using the PICs normal routine. Then
    3.11 + * an event is send to the receiving domain which has to explicitly call
    3.12 + * once it is finished dealing with the interrupt. Only then the PICs end
    3.13 + * handler is called. very cheesy with all sorts of problems but it seems 
    3.14 + * to work in normal cases. No shared interrupts are allowed.
    3.15 + *
    3.16 + * XXX this code is not SMP safe at the moment!
    3.17   */
    3.18  
    3.19  
    3.20 @@ -77,6 +85,12 @@ typedef struct _phys_dev_st
    3.21  /* an array of device descriptors index by IRQ number */
    3.22  static phys_dev_t *irqs[MAX_IRQS];
    3.23  
    3.24 +/*
    3.25 + * 
    3.26 + * General functions
    3.27 + * 
    3.28 + */
    3.29 +
    3.30  /* find a device on the device list */
    3.31  static phys_dev_t *find_pdev(struct task_struct *p, struct pci_dev *dev)
    3.32  {
    3.33 @@ -237,6 +251,11 @@ inline static int check_dev_acc (struct 
    3.34      return 0;
    3.35  }
    3.36  
    3.37 +/*
    3.38 + * 
    3.39 + * PCI config space access
    3.40 + * 
    3.41 + */
    3.42  
    3.43  /*
    3.44   * Base address registers contain the base address for IO regions.
    3.45 @@ -313,6 +332,7 @@ static int do_base_address_access(phys_d
    3.46  
    3.47              if ( res->flags & IORESOURCE_MEM )
    3.48              {
    3.49 +                /* this is written out explicitly for clarity */
    3.50                  *val = 0xffffffff;
    3.51                  /* bit    0 = 0 */
    3.52                  /* bit  21  = memory type */
    3.53 @@ -501,6 +521,13 @@ static long pci_cfgreg_write(int seg, in
    3.54                                        func, reg, len, &val);
    3.55          return ret;
    3.56          break;        
    3.57 +#if 0
    3.58 +    case 0xe0: /* XXX some device drivers seem to write to this.... */
    3.59 +        printk("pci write hack allowed %02x:%02x:%02x: "
    3.60 +                   "reg=0x%02x len=0x%02x val=0x%08x\n",
    3.61 +                   bus, dev, func, reg, len, val);
    3.62 +        break;        
    3.63 +#endif
    3.64      default:
    3.65          //if ( pdev->flags != ACC_WRITE ) 
    3.66          /* XXX for debug we disallow all write access */
    3.67 @@ -520,6 +547,12 @@ static long pci_cfgreg_write(int seg, in
    3.68      return ret;
    3.69  }
    3.70  
    3.71 +/*
    3.72 + * 
    3.73 + * Interrupt handling
    3.74 + * 
    3.75 + */
    3.76 +
    3.77  
    3.78  /*
    3.79   * return the IRQ xen assigned to the device.
    3.80 @@ -552,26 +585,23 @@ static void phys_dev_interrupt(int irq, 
    3.81          return;
    3.82      }
    3.83      
    3.84 -    //printk("irq %d pdev=%p\n", irq, pdev);
    3.85 -
    3.86      p = pdev->owner;
    3.87  
    3.88 -    //printk("owner %p\n", p);
    3.89 -
    3.90      if ( test_bit(irq, &p->shared_info->physirq_pend) )
    3.91      {
    3.92 -        printk("irq %d already delivered to guest\n", irq);
    3.93 +        /* Some interrupt already delivered to guest */
    3.94          return;
    3.95      }
    3.96 +
    3.97      /* notify guest */
    3.98      set_bit(irq, &p->shared_info->physirq_pend);
    3.99      set_bit(ST_IRQ_DELIVERED, &pdev->state);
   3.100 -    cpu_mask |= mark_guest_event(p, _EVENT_TIMER);
   3.101 +    cpu_mask |= mark_guest_event(p, _EVENT_PHYSIRQ);
   3.102      guest_event_notify(cpu_mask);
   3.103  }
   3.104  
   3.105  /* this is called instead of the PICs original end handler. 
   3.106 - * the real end handler is only called once the guest ack'ed the handling
   3.107 + * the real end handler is only called once the guest signalled the handling
   3.108   * of the event. */
   3.109  static void end_virt_irq (unsigned int i)
   3.110  {
   3.111 @@ -610,8 +640,6 @@ static long pci_request_irq(int irq)
   3.112          return -EINVAL;
   3.113      }
   3.114  
   3.115 -    printk("pdev= %p\n", pdev);
   3.116 -
   3.117      if ( irq >= MAX_IRQS )
   3.118      {
   3.119          printk("requested IRQ to big %d\n", irq);
   3.120 @@ -651,8 +679,9 @@ static long pci_request_irq(int irq)
   3.121      
   3.122      printk ("setup handler %d\n", irq);
   3.123  
   3.124 -    /* request the IRQ. this is not shared! */
   3.125 -    err = request_irq(irq, phys_dev_interrupt, 0, "network", (void *)pdev);
   3.126 +    /* request the IRQ. this is not shared and we use a slow handler! */
   3.127 +    err = request_irq(irq, phys_dev_interrupt, SA_INTERRUPT,
   3.128 +                      "foo", (void *)pdev);
   3.129      if ( err )
   3.130      {
   3.131          printk("error requesting irq\n");
   3.132 @@ -670,7 +699,35 @@ static long pci_request_irq(int irq)
   3.133  
   3.134  static long pci_free_irq(int irq)
   3.135  {
   3.136 -    /* XXX restore original handler and free_irq() */
   3.137 +    phys_dev_t *pdev;
   3.138 +
   3.139 +    if ( irq >= MAX_IRQS )
   3.140 +    {
   3.141 +        printk("requested IRQ to big %d\n", irq);
   3.142 +        return -EINVAL;
   3.143 +    }
   3.144 +
   3.145 +    if ( irqs[irq] == NULL )
   3.146 +    {
   3.147 +        printk ("irq not used %d\n", irq);
   3.148 +        return -EINVAL;
   3.149 +    }
   3.150 +
   3.151 +    pdev = irqs[irq];
   3.152 +
   3.153 +    /* shutdown IRQ */
   3.154 +    free_irq(irq, (void *)pdev);
   3.155 +
   3.156 +    /* restore irq controller  */
   3.157 +    irq_desc[irq].handler = pdev->orig_handler;
   3.158 +
   3.159 +    /* clean up */
   3.160 +    pdev->orig_handler = NULL;
   3.161 +    irqs[irq] = NULL;
   3.162 +    kfree(pdev->new_handler);
   3.163 +    pdev->new_handler = NULL;
   3.164 +
   3.165 +    printk("freed irq %d", irq);
   3.166      return 0;
   3.167  }
   3.168  
   3.169 @@ -724,6 +781,7 @@ static long pci_finished_irq(int irq)
   3.170      return 0;
   3.171  }
   3.172  
   3.173 +
   3.174  /*
   3.175   * demux hypervisor call.
   3.176   */
     4.1 --- a/xen/include/hypervisor-ifs/network.h	Tue Mar 23 08:30:15 2004 +0000
     4.2 +++ b/xen/include/hypervisor-ifs/network.h	Tue Mar 23 09:33:07 2004 +0000
     4.3 @@ -73,16 +73,16 @@ typedef union rx_entry_st
     4.4  } rx_entry_t;
     4.5  
     4.6  
     4.7 -#define TX_RING_SIZE 256
     4.8 -#define RX_RING_SIZE 256
     4.9 +#define XENNET_TX_RING_SIZE 256
    4.10 +#define XENNET_RX_RING_SIZE 256
    4.11  
    4.12  #define MAX_DOMAIN_VIFS 8
    4.13  
    4.14  /* This structure must fit in a memory page. */
    4.15  typedef struct net_ring_st
    4.16  {
    4.17 -    tx_entry_t tx_ring[TX_RING_SIZE];
    4.18 -    rx_entry_t rx_ring[RX_RING_SIZE];
    4.19 +    tx_entry_t tx_ring[XENNET_TX_RING_SIZE];
    4.20 +    rx_entry_t rx_ring[XENNET_RX_RING_SIZE];
    4.21  } net_ring_t;
    4.22  
    4.23  /*
    4.24 @@ -96,8 +96,8 @@ typedef unsigned int NET_RING_IDX;
    4.25   * size of the ring buffer. The following macros convert a free-running counter
    4.26   * into a value that can directly index a ring-buffer array.
    4.27   */
    4.28 -#define MASK_NET_RX_IDX(_i) ((_i)&(RX_RING_SIZE-1))
    4.29 -#define MASK_NET_TX_IDX(_i) ((_i)&(TX_RING_SIZE-1))
    4.30 +#define MASK_NET_RX_IDX(_i) ((_i)&(XENNET_RX_RING_SIZE-1))
    4.31 +#define MASK_NET_TX_IDX(_i) ((_i)&(XENNET_TX_RING_SIZE-1))
    4.32  
    4.33  typedef struct net_idx_st
    4.34  {
     5.1 --- a/xen/include/xeno/vif.h	Tue Mar 23 08:30:15 2004 +0000
     5.2 +++ b/xen/include/xeno/vif.h	Tue Mar 23 09:33:07 2004 +0000
     5.3 @@ -24,11 +24,12 @@
     5.4  
     5.5  extern struct net_device *the_dev;
     5.6  
     5.7 -/* 
     5.8 - * shadow ring structures are used to protect the descriptors from
     5.9 - * tampering after they have been passed to the hypervisor.
    5.10 - *
    5.11 - * TX_RING_SIZE and RX_RING_SIZE are defined in the shared network.h.
    5.12 +/*
    5.13 + * shadow ring structures are used to protect the descriptors from tampering 
    5.14 + * after they have been passed to the hypervisor.
    5.15 + * 
    5.16 + * XENNET_TX_RING_SIZE and XENNET_RX_RING_SIZE are defined in the shared
    5.17 + * network.h. 
    5.18   */
    5.19  
    5.20  typedef struct rx_shadow_entry_st 
    5.21 @@ -53,10 +54,10 @@ typedef struct net_vif_st {
    5.22      net_idx_t          *shared_idxs;
    5.23  
    5.24      /* The private rings and indexes. */
    5.25 -    rx_shadow_entry_t rx_shadow_ring[RX_RING_SIZE];
    5.26 +    rx_shadow_entry_t rx_shadow_ring[XENNET_RX_RING_SIZE];
    5.27      NET_RING_IDX rx_prod;  /* More buffers for filling go here. */
    5.28      NET_RING_IDX rx_cons;  /* Next buffer to fill is here. */
    5.29 -    tx_shadow_entry_t tx_shadow_ring[TX_RING_SIZE];
    5.30 +    tx_shadow_entry_t tx_shadow_ring[XENNET_TX_RING_SIZE];
    5.31      NET_RING_IDX tx_prod;  /* More packets for sending go here. */
    5.32      NET_RING_IDX tx_cons;  /* Next packet to send is here. */
    5.33  
     6.1 --- a/xen/net/dev.c	Tue Mar 23 08:30:15 2004 +0000
     6.2 +++ b/xen/net/dev.c	Tue Mar 23 09:33:07 2004 +0000
     6.3 @@ -1869,7 +1869,7 @@ static int get_tx_bufs(net_vif_t *vif)
     6.4   again:
     6.5      for ( i = vif->tx_req_cons; 
     6.6            (i != shared_idxs->tx_req_prod) && 
     6.7 -              ((i-vif->tx_resp_prod) != TX_RING_SIZE);
     6.8 +              ((i-vif->tx_resp_prod) != XENNET_TX_RING_SIZE);
     6.9            i++ )
    6.10      {
    6.11          tx     = shared_rings->tx_ring[MASK_NET_TX_IDX(i)].req;
    6.12 @@ -2061,7 +2061,7 @@ static void get_rx_bufs(net_vif_t *vif)
    6.13      j = vif->rx_prod;
    6.14      for ( i = vif->rx_req_cons; 
    6.15            (i != shared_idxs->rx_req_prod) && 
    6.16 -              ((i-vif->rx_resp_prod) != RX_RING_SIZE);
    6.17 +              ((i-vif->rx_resp_prod) != XENNET_RX_RING_SIZE);
    6.18            i++ )
    6.19      {
    6.20          rx = shared_rings->rx_ring[MASK_NET_RX_IDX(i)].req;
    6.21 @@ -2193,7 +2193,7 @@ long flush_bufs_for_vif(net_vif_t *vif)
    6.22      spin_lock(&vif->rx_lock);
    6.23      for ( i = vif->rx_req_cons; 
    6.24            (i != shared_idxs->rx_req_prod) &&
    6.25 -              ((i-vif->rx_resp_prod) != RX_RING_SIZE);
    6.26 +              ((i-vif->rx_resp_prod) != XENNET_RX_RING_SIZE);
    6.27            i++ )
    6.28      {
    6.29          make_rx_response(vif, shared_rings->rx_ring[MASK_NET_RX_IDX(i)].req.id,
    6.30 @@ -2242,7 +2242,7 @@ long flush_bufs_for_vif(net_vif_t *vif)
    6.31      spin_lock(&vif->tx_lock);
    6.32      for ( i = vif->tx_req_cons; 
    6.33            (i != shared_idxs->tx_req_prod) &&
    6.34 -              ((i-vif->tx_resp_prod) != TX_RING_SIZE);
    6.35 +              ((i-vif->tx_resp_prod) != XENNET_TX_RING_SIZE);
    6.36            i++ )
    6.37      {
    6.38          make_tx_response(vif, shared_rings->tx_ring[MASK_NET_TX_IDX(i)].req.id,
     7.1 --- a/xenolinux-2.4.25-sparse/arch/xeno/config.in	Tue Mar 23 08:30:15 2004 +0000
     7.2 +++ b/xenolinux-2.4.25-sparse/arch/xeno/config.in	Tue Mar 23 09:33:07 2004 +0000
     7.3 @@ -107,6 +107,10 @@ bool 'Networking support' CONFIG_NET
     7.4  bool 'PCI support' CONFIG_PCI
     7.5  if [ "$CONFIG_PCI" = "y" ]; then
     7.6     tristate '    3c590/3c900 series (592/595/597) "Vortex/Boomerang" support' CONFIG_VORTEX
     7.7 +   tristate 'Intel(R) PRO/1000 Gigabit Ethernet support' CONFIG_E1000
     7.8 +   if [ "$CONFIG_E1000" != "n" ]; then
     7.9 +      bool '  Use Rx Polling (NAPI)' CONFIG_E1000_NAPI
    7.10 +   fi
    7.11  fi
    7.12  source drivers/pci/Config.in
    7.13  
     8.1 --- a/xenolinux-2.4.25-sparse/arch/xeno/drivers/network/network.c	Tue Mar 23 08:30:15 2004 +0000
     8.2 +++ b/xenolinux-2.4.25-sparse/arch/xeno/drivers/network/network.c	Tue Mar 23 09:33:07 2004 +0000
     8.3 @@ -58,8 +58,8 @@ struct net_private
     8.4       * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
     8.5       * array is an index into a chain of free entries.
     8.6       */
     8.7 -    struct sk_buff *tx_skbs[TX_RING_SIZE+1];
     8.8 -    struct sk_buff *rx_skbs[RX_RING_SIZE+1];
     8.9 +    struct sk_buff *tx_skbs[XENNET_TX_RING_SIZE+1];
    8.10 +    struct sk_buff *rx_skbs[XENNET_RX_RING_SIZE+1];
    8.11  };
    8.12  
    8.13  /* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
    8.14 @@ -143,9 +143,9 @@ static int network_open(struct net_devic
    8.15      memset(np->net_idx, 0, sizeof(*np->net_idx));
    8.16  
    8.17      /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
    8.18 -    for ( i = 0; i <= TX_RING_SIZE; i++ )
    8.19 +    for ( i = 0; i <= XENNET_TX_RING_SIZE; i++ )
    8.20          np->tx_skbs[i] = (void *)(i+1);
    8.21 -    for ( i = 0; i <= RX_RING_SIZE; i++ )
    8.22 +    for ( i = 0; i <= XENNET_RX_RING_SIZE; i++ )
    8.23          np->rx_skbs[i] = (void *)(i+1);
    8.24  
    8.25      wmb();
    8.26 @@ -196,7 +196,8 @@ static void network_tx_buf_gc(struct net
    8.27      }
    8.28      while ( prod != np->net_idx->tx_resp_prod );
    8.29  
    8.30 -    if ( np->tx_full && ((np->net_idx->tx_req_prod - prod) < TX_RING_SIZE) )
    8.31 +    if ( np->tx_full && 
    8.32 +         ((np->net_idx->tx_req_prod - prod) < XENNET_TX_RING_SIZE) )
    8.33      {
    8.34          np->tx_full = 0;
    8.35          if ( np->state == STATE_ACTIVE )
    8.36 @@ -223,7 +224,7 @@ static void network_alloc_rx_buffers(str
    8.37      netop_t netop;
    8.38      NET_RING_IDX i = np->net_idx->rx_req_prod;
    8.39  
    8.40 -    if ( unlikely((i - np->rx_resp_cons) == RX_RING_SIZE) || 
    8.41 +    if ( unlikely((i - np->rx_resp_cons) == XENNET_RX_RING_SIZE) || 
    8.42           unlikely(np->state != STATE_ACTIVE) )
    8.43          return;
    8.44  
    8.45 @@ -246,7 +247,7 @@ static void network_alloc_rx_buffers(str
    8.46  
    8.47          np->rx_bufs_to_notify++;
    8.48      }
    8.49 -    while ( (++i - np->rx_resp_cons) != RX_RING_SIZE );
    8.50 +    while ( (++i - np->rx_resp_cons) != XENNET_RX_RING_SIZE );
    8.51  
    8.52      /*
    8.53       * We may have allocated buffers which have entries outstanding in the page
    8.54 @@ -258,7 +259,7 @@ static void network_alloc_rx_buffers(str
    8.55      np->net_idx->rx_event    = np->rx_resp_cons + 1;
    8.56          
    8.57      /* Batch Xen notifications. */
    8.58 -    if ( np->rx_bufs_to_notify > (RX_RING_SIZE/4) )
    8.59 +    if ( np->rx_bufs_to_notify > (XENNET_RX_RING_SIZE/4) )
    8.60      {
    8.61          netop.cmd = NETOP_PUSH_BUFFERS;
    8.62          netop.vif = np->idx;
    8.63 @@ -313,7 +314,7 @@ static int network_start_xmit(struct sk_
    8.64  
    8.65      network_tx_buf_gc(dev);
    8.66  
    8.67 -    if ( (i - np->tx_resp_cons) == (TX_RING_SIZE - 1) )
    8.68 +    if ( (i - np->tx_resp_cons) == (XENNET_TX_RING_SIZE - 1) )
    8.69      {
    8.70          np->tx_full = 1;
    8.71          netif_stop_queue(dev);
     9.1 --- a/xenolinux-2.4.25-sparse/arch/xeno/drivers/vnetif/vnetif.c	Tue Mar 23 08:30:15 2004 +0000
     9.2 +++ b/xenolinux-2.4.25-sparse/arch/xeno/drivers/vnetif/vnetif.c	Tue Mar 23 09:33:07 2004 +0000
     9.3 @@ -58,8 +58,8 @@ struct net_private
     9.4       * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
     9.5       * array is an index into a chain of free entries.
     9.6       */
     9.7 -    struct sk_buff *tx_skbs[TX_RING_SIZE+1];
     9.8 -    struct sk_buff *rx_skbs[RX_RING_SIZE+1];
     9.9 +    struct sk_buff *tx_skbs[XENNET_TX_RING_SIZE+1];
    9.10 +    struct sk_buff *rx_skbs[XENNET_RX_RING_SIZE+1];
    9.11  };
    9.12  
    9.13  /* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
    9.14 @@ -143,9 +143,9 @@ static int network_open(struct net_devic
    9.15      memset(np->net_idx, 0, sizeof(*np->net_idx));
    9.16  
    9.17      /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
    9.18 -    for ( i = 0; i <= TX_RING_SIZE; i++ )
    9.19 +    for ( i = 0; i <= XENNET_TX_RING_SIZE; i++ )
    9.20          np->tx_skbs[i] = (void *)(i+1);
    9.21 -    for ( i = 0; i <= RX_RING_SIZE; i++ )
    9.22 +    for ( i = 0; i <= XENNET_RX_RING_SIZE; i++ )
    9.23          np->rx_skbs[i] = (void *)(i+1);
    9.24  
    9.25      wmb();
    9.26 @@ -196,7 +196,8 @@ static void network_tx_buf_gc(struct net
    9.27      }
    9.28      while ( prod != np->net_idx->tx_resp_prod );
    9.29  
    9.30 -    if ( np->tx_full && ((np->net_idx->tx_req_prod - prod) < TX_RING_SIZE) )
    9.31 +    if ( np->tx_full && 
    9.32 +         ((np->net_idx->tx_req_prod - prod) < XENNET_TX_RING_SIZE) )
    9.33      {
    9.34          np->tx_full = 0;
    9.35          if ( np->state == STATE_ACTIVE )
    9.36 @@ -223,7 +224,7 @@ static void network_alloc_rx_buffers(str
    9.37      netop_t netop;
    9.38      NET_RING_IDX i = np->net_idx->rx_req_prod;
    9.39  
    9.40 -    if ( unlikely((i - np->rx_resp_cons) == RX_RING_SIZE) || 
    9.41 +    if ( unlikely((i - np->rx_resp_cons) == XENNET_RX_RING_SIZE) || 
    9.42           unlikely(np->state != STATE_ACTIVE) )
    9.43          return;
    9.44  
    9.45 @@ -246,7 +247,7 @@ static void network_alloc_rx_buffers(str
    9.46  
    9.47          np->rx_bufs_to_notify++;
    9.48      }
    9.49 -    while ( (++i - np->rx_resp_cons) != RX_RING_SIZE );
    9.50 +    while ( (++i - np->rx_resp_cons) != XENNET_RX_RING_SIZE );
    9.51  
    9.52      /*
    9.53       * We may have allocated buffers which have entries outstanding in the page
    9.54 @@ -258,7 +259,7 @@ static void network_alloc_rx_buffers(str
    9.55      np->net_idx->rx_event    = np->rx_resp_cons + 1;
    9.56          
    9.57      /* Batch Xen notifications. */
    9.58 -    if ( np->rx_bufs_to_notify > (RX_RING_SIZE/4) )
    9.59 +    if ( np->rx_bufs_to_notify > (XENNET_RX_RING_SIZE/4) )
    9.60      {
    9.61          netop.cmd = NETOP_PUSH_BUFFERS;
    9.62          netop.vif = np->idx;
    9.63 @@ -313,7 +314,7 @@ static int network_start_xmit(struct sk_
    9.64  
    9.65      network_tx_buf_gc(dev);
    9.66  
    9.67 -    if ( (i - np->tx_resp_cons) == (TX_RING_SIZE - 1) )
    9.68 +    if ( (i - np->tx_resp_cons) == (XENNET_TX_RING_SIZE - 1) )
    9.69      {
    9.70          np->tx_full = 1;
    9.71          netif_stop_queue(dev);
    10.1 --- a/xenolinux-2.4.25-sparse/arch/xeno/kernel/hypervisor.c	Tue Mar 23 08:30:15 2004 +0000
    10.2 +++ b/xenolinux-2.4.25-sparse/arch/xeno/kernel/hypervisor.c	Tue Mar 23 09:33:07 2004 +0000
    10.3 @@ -7,8 +7,9 @@
    10.4   */
    10.5  
    10.6  #include <linux/config.h>
    10.7 +#include <linux/irq.h>
    10.8 +#include <linux/kernel_stat.h>
    10.9  #include <asm/atomic.h>
   10.10 -#include <linux/irq.h>
   10.11  #include <asm/hypervisor.h>
   10.12  #include <asm/system.h>
   10.13  #include <asm/ptrace.h>
   10.14 @@ -18,6 +19,40 @@ int nr_multicall_ents = 0;
   10.15  
   10.16  static unsigned long event_mask = 0;
   10.17  
   10.18 +asmlinkage unsigned int do_physirq(int irq, struct pt_regs *regs)
   10.19 +{
   10.20 +    int cpu = smp_processor_id();
   10.21 +    unsigned long irqs;
   10.22 +    shared_info_t *shared = HYPERVISOR_shared_info;
   10.23 +
   10.24 +    /* do this manually */
   10.25 +    kstat.irqs[cpu][irq]++;
   10.26 +    ack_hypervisor_event(irq);
   10.27 +
   10.28 +    barrier();
   10.29 +    irqs  = xchg(&shared->physirq_pend, 0);
   10.30 +
   10.31 +    __asm__ __volatile__ (
   10.32 +        "   push %1                            ;"
   10.33 +        "   sub  $4,%%esp                      ;"
   10.34 +        "   jmp  3f                            ;"
   10.35 +        "1: btrl %%eax,%0                      ;" /* clear bit     */
   10.36 +        "   mov  %%eax,(%%esp)                 ;"
   10.37 +        "   call do_IRQ                        ;" /* do_IRQ(event) */
   10.38 +        "3: bsfl %0,%%eax                      ;" /* %eax == bit # */
   10.39 +        "   jnz  1b                            ;"
   10.40 +        "   add  $8,%%esp                      ;"
   10.41 +        /* we use %ebx because it is callee-saved */
   10.42 +        : : "b" (irqs), "r" (regs)
   10.43 +        /* clobbered by callback function calls */
   10.44 +        : "eax", "ecx", "edx", "memory" ); 
   10.45 +
   10.46 +    /* do this manually */
   10.47 +    end_hypervisor_event(irq);
   10.48 +
   10.49 +    return 0;
   10.50 +}
   10.51 +
   10.52  void do_hypervisor_callback(struct pt_regs *regs)
   10.53  {
   10.54      unsigned long events, flags;
   10.55 @@ -32,6 +67,12 @@ void do_hypervisor_callback(struct pt_re
   10.56          events  = xchg(&shared->events, 0);
   10.57          events &= event_mask;
   10.58  
   10.59 +        if ( (events & EVENT_PHYSIRQ) != 0 )
   10.60 +        {
   10.61 +            do_physirq(_EVENT_PHYSIRQ, regs);
   10.62 +            events &= ~EVENT_PHYSIRQ;
   10.63 +        }
   10.64 +
   10.65          __asm__ __volatile__ (
   10.66              "   push %1                            ;"
   10.67              "   sub  $4,%%esp                      ;"
    11.1 --- a/xenolinux-2.4.25-sparse/arch/xeno/kernel/i386_ksyms.c	Tue Mar 23 08:30:15 2004 +0000
    11.2 +++ b/xenolinux-2.4.25-sparse/arch/xeno/kernel/i386_ksyms.c	Tue Mar 23 09:33:07 2004 +0000
    11.3 @@ -9,7 +9,7 @@
    11.4  #include <linux/interrupt.h>
    11.5  #include <linux/smp_lock.h>
    11.6  #include <linux/pm.h>
    11.7 -//XXX ??? #include <linux/pci.h>
    11.8 +#include <linux/pci.h>
    11.9  #include <linux/apm_bios.h>
   11.10  #include <linux/kernel.h>
   11.11  #include <linux/string.h>
   11.12 @@ -68,6 +68,8 @@ EXPORT_SYMBOL(pm_power_off);
   11.13  EXPORT_SYMBOL(apm_info);
   11.14  //EXPORT_SYMBOL(gdt);
   11.15  EXPORT_SYMBOL(empty_zero_page);
   11.16 +EXPORT_SYMBOL(phys_to_machine_mapping);
   11.17 +
   11.18  
   11.19  #ifdef CONFIG_DEBUG_IOVIRT
   11.20  EXPORT_SYMBOL(__io_virt_debug);
   11.21 @@ -101,6 +103,16 @@ EXPORT_SYMBOL(__generic_copy_from_user);
   11.22  EXPORT_SYMBOL(__generic_copy_to_user);
   11.23  EXPORT_SYMBOL(strnlen_user);
   11.24  
   11.25 +
   11.26 +EXPORT_SYMBOL(pci_alloc_consistent);
   11.27 +EXPORT_SYMBOL(pci_free_consistent);
   11.28 +
   11.29 +#ifdef CONFIG_PCI
   11.30 +EXPORT_SYMBOL(pcibios_penalize_isa_irq);
   11.31 +EXPORT_SYMBOL(pci_mem_start);
   11.32 +#endif
   11.33 +
   11.34 +
   11.35  #ifdef CONFIG_X86_USE_3DNOW
   11.36  EXPORT_SYMBOL(_mmx_memcpy);
   11.37  EXPORT_SYMBOL(mmx_clear_page);
    12.1 --- a/xenolinux-2.4.25-sparse/arch/xeno/kernel/physirq.c	Tue Mar 23 08:30:15 2004 +0000
    12.2 +++ b/xenolinux-2.4.25-sparse/arch/xeno/kernel/physirq.c	Tue Mar 23 09:33:07 2004 +0000
    12.3 @@ -38,7 +38,9 @@ static unsigned int startup_physirq_even
    12.4      printk("startup_physirq_event %d\n", irq);
    12.5  
    12.6      /*
    12.7 -     * install a interrupt handler for physirq event when called thefirst tim
    12.8 +     * install a interrupt handler for physirq event when called first time
    12.9 +     * we actually are never executing the handler as _EVENT_PHYSIRQ is 
   12.10 +     * handled specially in hypervisor.c But we need to enable the event etc.
   12.11       */
   12.12      if ( !setup_event_handler )
   12.13      {
   12.14 @@ -66,23 +68,51 @@ static unsigned int startup_physirq_even
   12.15      }
   12.16      return 0;
   12.17  }
   12.18 +/*
   12.19 + * This is a dummy interrupt handler.
   12.20 + * It should never be called. events for physical interrupts are handled
   12.21 + * differently in hypervisor.c
   12.22 + */
   12.23 +static void physirq_interrupt(int irq, void *unused, struct pt_regs *ptregs)
   12.24 +{
   12.25 +    printk("XXX This should never be called!");
   12.26 +}
   12.27  
   12.28 +
   12.29 +/*
   12.30 + * IRQ is not needed anymore.
   12.31 + */
   12.32  static void shutdown_physirq_event(unsigned int irq)
   12.33  {
   12.34 +    physdev_op_t op;
   12.35 +    int err;
   12.36  
   12.37 -    /* call xen to free IRQ */
   12.38 +    printk("shutdown_phys_irq called.");
   12.39  
   12.40 +    /*
   12.41 +     * tell hypervisor
   12.42 +     */
   12.43 +    op.cmd = PHYSDEVOP_FREE_IRQ;
   12.44 +    op.u.free_irq.irq   = irq;
   12.45 +    if ( (err = HYPERVISOR_physdev_op(&op)) != 0 )
   12.46 +    {
   12.47 +        printk(KERN_ALERT "could not free IRQ %d\n", irq);
   12.48 +        return;
   12.49 +    }
   12.50 +    return;
   12.51  }
   12.52  
   12.53  
   12.54  static void enable_physirq_event(unsigned int irq)
   12.55  {
   12.56 -    /* XXX just enable all interrupts for now */
   12.57 +    /* XXX just enable all phys interrupts for now */
   12.58 +    enable_irq(HYPEREVENT_IRQ(_EVENT_PHYSIRQ));
   12.59  }
   12.60  
   12.61  static void disable_physirq_event(unsigned int irq)
   12.62  {
   12.63 -    /* XXX just disable all interrupts for now */
   12.64 +    /* XXX just disable all phys interrupts for now */
   12.65 +    disable_irq(HYPEREVENT_IRQ(_EVENT_PHYSIRQ));
   12.66  }
   12.67  
   12.68  static void ack_physirq_event(unsigned int irq)
   12.69 @@ -100,6 +130,7 @@ static void end_physirq_event(unsigned i
   12.70  {
   12.71      int err;
   12.72      physdev_op_t op;
   12.73 +
   12.74      /* call hypervisor */
   12.75      op.cmd = PHYSDEVOP_FINISHED_IRQ;
   12.76      op.u.finished_irq.irq   = irq;
   12.77 @@ -123,21 +154,6 @@ static struct hw_interrupt_type physirq_
   12.78  };
   12.79  
   12.80  
   12.81 -/*
   12.82 - * this interrupt handler demuxes the virt phys event and the virt phys 
   12.83 - * bitmask and calls the interrupt handlers for virtualised physical interrupts
   12.84 - */
   12.85 -static void physirq_interrupt(int irq, void *unused, struct pt_regs *ptregs)
   12.86 -{
   12.87 -#if 0
   12.88 -    unsigned long flags;
   12.89 -    int virq;
   12.90 -    local_irq_save(flags);
   12.91 -    do_IRQ(virq);
   12.92 -    local_irq_restore(flags);
   12.93 -#endif
   12.94 -}
   12.95 -
   12.96  
   12.97  void __init physirq_init(void)
   12.98  {
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/xenolinux-2.4.25-sparse/include/asm-xeno/io.h	Tue Mar 23 09:33:07 2004 +0000
    13.3 @@ -0,0 +1,430 @@
    13.4 +#ifndef _ASM_IO_H
    13.5 +#define _ASM_IO_H
    13.6 +
    13.7 +#include <linux/config.h>
    13.8 +
    13.9 +/*
   13.10 + * This file contains the definitions for the x86 IO instructions
   13.11 + * inb/inw/inl/outb/outw/outl and the "string versions" of the same
   13.12 + * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
   13.13 + * versions of the single-IO instructions (inb_p/inw_p/..).
   13.14 + *
   13.15 + * This file is not meant to be obfuscating: it's just complicated
   13.16 + * to (a) handle it all in a way that makes gcc able to optimize it
   13.17 + * as well as possible and (b) trying to avoid writing the same thing
   13.18 + * over and over again with slight variations and possibly making a
   13.19 + * mistake somewhere.
   13.20 + */
   13.21 +
   13.22 +/*
   13.23 + * Thanks to James van Artsdalen for a better timing-fix than
   13.24 + * the two short jumps: using outb's to a nonexistent port seems
   13.25 + * to guarantee better timings even on fast machines.
   13.26 + *
   13.27 + * On the other hand, I'd like to be sure of a non-existent port:
   13.28 + * I feel a bit unsafe about using 0x80 (should be safe, though)
   13.29 + *
   13.30 + *		Linus
   13.31 + */
   13.32 +
   13.33 + /*
   13.34 +  *  Bit simplified and optimized by Jan Hubicka
   13.35 +  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
   13.36 +  *
   13.37 +  *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
   13.38 +  *  isa_read[wl] and isa_write[wl] fixed
   13.39 +  *  - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   13.40 +  */
   13.41 +
   13.42 +#define IO_SPACE_LIMIT 0xffff
   13.43 +
   13.44 +#define XQUAD_PORTIO_BASE 0xfe400000
   13.45 +#define XQUAD_PORTIO_QUAD 0x40000  /* 256k per quad. */
   13.46 +#define XQUAD_PORTIO_LEN  0x80000  /* Only remapping first 2 quads */
   13.47 +
   13.48 +#ifdef __KERNEL__
   13.49 +
   13.50 +#include <linux/vmalloc.h>
   13.51 +
   13.52 +/*
   13.53 + * Temporary debugging check to catch old code using
   13.54 + * unmapped ISA addresses. Will be removed in 2.4.
   13.55 + */
   13.56 +#if CONFIG_DEBUG_IOVIRT
   13.57 +  extern void *__io_virt_debug(unsigned long x, const char *file, int line);
   13.58 +  extern unsigned long __io_phys_debug(unsigned long x, const char *file, int line);
   13.59 +  #define __io_virt(x) __io_virt_debug((unsigned long)(x), __FILE__, __LINE__)
   13.60 +//#define __io_phys(x) __io_phys_debug((unsigned long)(x), __FILE__, __LINE__)
   13.61 +#else
   13.62 +  #define __io_virt(x) ((void *)(x))
   13.63 +//#define __io_phys(x) __pa(x)
   13.64 +#endif
   13.65 +
   13.66 +/**
   13.67 + *	virt_to_phys	-	map virtual addresses to physical
   13.68 + *	@address: address to remap
   13.69 + *
   13.70 + *	The returned physical address is the physical (CPU) mapping for
   13.71 + *	the memory address given. It is only valid to use this function on
   13.72 + *	addresses directly mapped or allocated via kmalloc. 
   13.73 + *
   13.74 + *	This function does not give bus mappings for DMA transfers. In
   13.75 + *	almost all conceivable cases a device driver should not be using
   13.76 + *	this function
   13.77 + */
   13.78 + 
   13.79 +static inline unsigned long virt_to_phys(volatile void * address)
   13.80 +{
   13.81 +	return __pa(address);
   13.82 +}
   13.83 +
   13.84 +/**
   13.85 + *	phys_to_virt	-	map physical address to virtual
   13.86 + *	@address: address to remap
   13.87 + *
   13.88 + *	The returned virtual address is a current CPU mapping for
   13.89 + *	the memory address given. It is only valid to use this function on
   13.90 + *	addresses that have a kernel mapping
   13.91 + *
   13.92 + *	This function does not handle bus mappings for DMA transfers. In
   13.93 + *	almost all conceivable cases a device driver should not be using
   13.94 + *	this function
   13.95 + */
   13.96 +
   13.97 +static inline void * phys_to_virt(unsigned long address)
   13.98 +{
   13.99 +	return __va(address);
  13.100 +}
  13.101 +
  13.102 +/*
  13.103 + * Change "struct page" to physical address.
  13.104 + */
  13.105 +#ifdef CONFIG_HIGHMEM64G
  13.106 +#define page_to_phys(page)	((u64)(page - mem_map) << PAGE_SHIFT)
  13.107 +#else
  13.108 +#define page_to_phys(page)	((page - mem_map) << PAGE_SHIFT)
  13.109 +#endif
  13.110 +
  13.111 +extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
  13.112 +
  13.113 +/**
  13.114 + *	ioremap		-	map bus memory into CPU space
  13.115 + *	@offset:	bus address of the memory
  13.116 + *	@size:		size of the resource to map
  13.117 + *
  13.118 + *	ioremap performs a platform specific sequence of operations to
  13.119 + *	make bus memory CPU accessible via the readb/readw/readl/writeb/
  13.120 + *	writew/writel functions and the other mmio helpers. The returned
  13.121 + *	address is not guaranteed to be usable directly as a virtual
  13.122 + *	address. 
  13.123 + */
  13.124 + 
  13.125 +static inline void * ioremap (unsigned long offset, unsigned long size)
  13.126 +{
  13.127 +	return __ioremap(offset, size, 0);
  13.128 +}
  13.129 +
  13.130 +/**
  13.131 + *	ioremap_nocache		-	map bus memory into CPU space
  13.132 + *	@offset:	bus address of the memory
  13.133 + *	@size:		size of the resource to map
  13.134 + *
  13.135 + *	ioremap_nocache performs a platform specific sequence of operations to
  13.136 + *	make bus memory CPU accessible via the readb/readw/readl/writeb/
  13.137 + *	writew/writel functions and the other mmio helpers. The returned
  13.138 + *	address is not guaranteed to be usable directly as a virtual
  13.139 + *	address. 
  13.140 + *
  13.141 + *	This version of ioremap ensures that the memory is marked uncachable
  13.142 + *	on the CPU as well as honouring existing caching rules from things like
  13.143 + *	the PCI bus. Note that there are other caches and buffers on many 
  13.144 + *	busses. In paticular driver authors should read up on PCI writes
  13.145 + *
  13.146 + *	It's useful if some control registers are in such an area and
  13.147 + *	write combining or read caching is not desirable:
  13.148 + */
  13.149 + 
  13.150 +static inline void * ioremap_nocache (unsigned long offset, unsigned long size)
  13.151 +{
  13.152 +        return __ioremap(offset, size, _PAGE_PCD);
  13.153 +}
  13.154 +
  13.155 +extern void iounmap(void *addr);
  13.156 +
  13.157 +/*
  13.158 + * bt_ioremap() and bt_iounmap() are for temporary early boot-time
  13.159 + * mappings, before the real ioremap() is functional.
  13.160 + * A boot-time mapping is currently limited to at most 16 pages.
  13.161 + */
  13.162 +extern void *bt_ioremap(unsigned long offset, unsigned long size);
  13.163 +extern void bt_iounmap(void *addr, unsigned long size);
  13.164 +
  13.165 +/*
  13.166 + * IO bus memory addresses are also 1:1 with the physical address
  13.167 + */
  13.168 +#define virt_to_bus(_x) phys_to_machine(virt_to_phys(_x))
  13.169 +#define bus_to_virt(_x) phys_to_virt(machine_to_phys(_x))
  13.170 +#define page_to_bus(_x) phys_to_machine(page_to_phys(_x))
  13.171 +
  13.172 +/*
  13.173 + * readX/writeX() are used to access memory mapped devices. On some
  13.174 + * architectures the memory mapped IO stuff needs to be accessed
  13.175 + * differently. On the x86 architecture, we just read/write the
  13.176 + * memory location directly.
  13.177 + */
  13.178 +
  13.179 +#define readb(addr) (*(volatile unsigned char *) __io_virt(addr))
  13.180 +#define readw(addr) (*(volatile unsigned short *) __io_virt(addr))
  13.181 +#define readl(addr) (*(volatile unsigned int *) __io_virt(addr))
  13.182 +#define __raw_readb readb
  13.183 +#define __raw_readw readw
  13.184 +#define __raw_readl readl
  13.185 +
  13.186 +#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b))
  13.187 +#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b))
  13.188 +#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b))
  13.189 +#define __raw_writeb writeb
  13.190 +#define __raw_writew writew
  13.191 +#define __raw_writel writel
  13.192 +
  13.193 +#define memset_io(a,b,c)	__memset(__io_virt(a),(b),(c))
  13.194 +#define memcpy_fromio(a,b,c)	__memcpy((a),__io_virt(b),(c))
  13.195 +#define memcpy_toio(a,b,c)	__memcpy(__io_virt(a),(b),(c))
  13.196 +
  13.197 +/*
  13.198 + * ISA space is 'always mapped' on a typical x86 system, no need to
  13.199 + * explicitly ioremap() it. The fact that the ISA IO space is mapped
  13.200 + * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
  13.201 + * are physical addresses. The following constant pointer can be
  13.202 + * used as the IO-area pointer (it can be iounmapped as well, so the
  13.203 + * analogy with PCI is quite large):
  13.204 + */
  13.205 +#define __ISA_IO_base ((char *)(PAGE_OFFSET))
  13.206 +
  13.207 +#define isa_readb(a) readb(__ISA_IO_base + (a))
  13.208 +#define isa_readw(a) readw(__ISA_IO_base + (a))
  13.209 +#define isa_readl(a) readl(__ISA_IO_base + (a))
  13.210 +#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
  13.211 +#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
  13.212 +#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
  13.213 +#define isa_memset_io(a,b,c)		memset_io(__ISA_IO_base + (a),(b),(c))
  13.214 +#define isa_memcpy_fromio(a,b,c)	memcpy_fromio((a),__ISA_IO_base + (b),(c))
  13.215 +#define isa_memcpy_toio(a,b,c)		memcpy_toio(__ISA_IO_base + (a),(b),(c))
  13.216 +
  13.217 +
  13.218 +/*
  13.219 + * Again, i386 does not require mem IO specific function.
  13.220 + */
  13.221 +
  13.222 +#define eth_io_copy_and_sum(a,b,c,d)		eth_copy_and_sum((a),__io_virt(b),(c),(d))
  13.223 +#define isa_eth_io_copy_and_sum(a,b,c,d)	eth_copy_and_sum((a),__io_virt(__ISA_IO_base + (b)),(c),(d))
  13.224 +
  13.225 +/**
  13.226 + *	check_signature		-	find BIOS signatures
  13.227 + *	@io_addr: mmio address to check 
  13.228 + *	@signature:  signature block
  13.229 + *	@length: length of signature
  13.230 + *
  13.231 + *	Perform a signature comparison with the mmio address io_addr. This
  13.232 + *	address should have been obtained by ioremap.
  13.233 + *	Returns 1 on a match.
  13.234 + */
  13.235 + 
  13.236 +static inline int check_signature(unsigned long io_addr,
  13.237 +	const unsigned char *signature, int length)
  13.238 +{
  13.239 +	int retval = 0;
  13.240 +	do {
  13.241 +		if (readb(io_addr) != *signature)
  13.242 +			goto out;
  13.243 +		io_addr++;
  13.244 +		signature++;
  13.245 +		length--;
  13.246 +	} while (length);
  13.247 +	retval = 1;
  13.248 +out:
  13.249 +	return retval;
  13.250 +}
  13.251 +
  13.252 +/**
  13.253 + *	isa_check_signature		-	find BIOS signatures
  13.254 + *	@io_addr: mmio address to check 
  13.255 + *	@signature:  signature block
  13.256 + *	@length: length of signature
  13.257 + *
  13.258 + *	Perform a signature comparison with the ISA mmio address io_addr.
  13.259 + *	Returns 1 on a match.
  13.260 + *
  13.261 + *	This function is deprecated. New drivers should use ioremap and
  13.262 + *	check_signature.
  13.263 + */
  13.264 + 
  13.265 +
  13.266 +static inline int isa_check_signature(unsigned long io_addr,
  13.267 +	const unsigned char *signature, int length)
  13.268 +{
  13.269 +	int retval = 0;
  13.270 +	do {
  13.271 +		if (isa_readb(io_addr) != *signature)
  13.272 +			goto out;
  13.273 +		io_addr++;
  13.274 +		signature++;
  13.275 +		length--;
  13.276 +	} while (length);
  13.277 +	retval = 1;
  13.278 +out:
  13.279 +	return retval;
  13.280 +}
  13.281 +
  13.282 +/*
  13.283 + *	Cache management
  13.284 + *
  13.285 + *	This needed for two cases
  13.286 + *	1. Out of order aware processors
  13.287 + *	2. Accidentally out of order processors (PPro errata #51)
  13.288 + */
  13.289 + 
  13.290 +#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
  13.291 +
  13.292 +static inline void flush_write_buffers(void)
  13.293 +{
  13.294 +	__asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
  13.295 +}
  13.296 +
  13.297 +#define dma_cache_inv(_start,_size)		flush_write_buffers()
  13.298 +#define dma_cache_wback(_start,_size)		flush_write_buffers()
  13.299 +#define dma_cache_wback_inv(_start,_size)	flush_write_buffers()
  13.300 +
  13.301 +#else
  13.302 +
  13.303 +/* Nothing to do */
  13.304 +
  13.305 +#define dma_cache_inv(_start,_size)		do { } while (0)
  13.306 +#define dma_cache_wback(_start,_size)		do { } while (0)
  13.307 +#define dma_cache_wback_inv(_start,_size)	do { } while (0)
  13.308 +#define flush_write_buffers()
  13.309 +
  13.310 +#endif
  13.311 +
  13.312 +#endif /* __KERNEL__ */
  13.313 +
  13.314 +#ifdef SLOW_IO_BY_JUMPING
  13.315 +#define __SLOW_DOWN_IO "\njmp 1f\n1:\tjmp 1f\n1:"
  13.316 +#else
  13.317 +#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
  13.318 +#endif
  13.319 +
  13.320 +#ifdef REALLY_SLOW_IO
  13.321 +#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
  13.322 +#else
  13.323 +#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
  13.324 +#endif
  13.325 +
  13.326 +#ifdef CONFIG_MULTIQUAD
  13.327 +extern void *xquad_portio;    /* Where the IO area was mapped */
  13.328 +#endif /* CONFIG_MULTIQUAD */
  13.329 +
  13.330 +/*
  13.331 + * Talk about misusing macros..
  13.332 + */
  13.333 +#define __OUT1(s,x) \
  13.334 +static inline void out##s(unsigned x value, unsigned short port) {
  13.335 +
  13.336 +#define __OUT2(s,s1,s2) \
  13.337 +__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
  13.338 +
  13.339 +#if defined (CONFIG_MULTIQUAD) && !defined(STANDALONE)
  13.340 +#define __OUTQ(s,ss,x)    /* Do the equivalent of the portio op on quads */ \
  13.341 +static inline void out##ss(unsigned x value, unsigned short port) { \
  13.342 +	if (xquad_portio) \
  13.343 +		write##s(value, (unsigned long) xquad_portio + port); \
  13.344 +	else               /* We're still in early boot, running on quad 0 */ \
  13.345 +		out##ss##_local(value, port); \
  13.346 +} \
  13.347 +static inline void out##ss##_quad(unsigned x value, unsigned short port, int quad) { \
  13.348 +	if (xquad_portio) \
  13.349 +		write##s(value, (unsigned long) xquad_portio + (XQUAD_PORTIO_QUAD*quad)\
  13.350 +			+ port); \
  13.351 +}
  13.352 +
  13.353 +#define __INQ(s,ss)       /* Do the equivalent of the portio op on quads */ \
  13.354 +static inline RETURN_TYPE in##ss(unsigned short port) { \
  13.355 +	if (xquad_portio) \
  13.356 +		return read##s((unsigned long) xquad_portio + port); \
  13.357 +	else               /* We're still in early boot, running on quad 0 */ \
  13.358 +		return in##ss##_local(port); \
  13.359 +} \
  13.360 +static inline RETURN_TYPE in##ss##_quad(unsigned short port, int quad) { \
  13.361 +	if (xquad_portio) \
  13.362 +		return read##s((unsigned long) xquad_portio + (XQUAD_PORTIO_QUAD*quad)\
  13.363 +			+ port); \
  13.364 +	else\
  13.365 +		return 0;\
  13.366 +}
  13.367 +#endif /* CONFIG_MULTIQUAD && !STANDALONE */
  13.368 +
  13.369 +#if !defined(CONFIG_MULTIQUAD) || defined(STANDALONE)
  13.370 +#define __OUT(s,s1,x) \
  13.371 +__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
  13.372 +__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} 
  13.373 +#else
  13.374 +/* Make the default portio routines operate on quad 0 */
  13.375 +#define __OUT(s,s1,x) \
  13.376 +__OUT1(s##_local,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
  13.377 +__OUT1(s##_p_local,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
  13.378 +__OUTQ(s,s,x) \
  13.379 +__OUTQ(s,s##_p,x) 
  13.380 +#endif /* !CONFIG_MULTIQUAD || STANDALONE */
  13.381 +
  13.382 +#define __IN1(s) \
  13.383 +static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
  13.384 +
  13.385 +#define __IN2(s,s1,s2) \
  13.386 +__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
  13.387 +
  13.388 +#if !defined(CONFIG_MULTIQUAD) || defined(STANDALONE)
  13.389 +#define __IN(s,s1,i...) \
  13.390 +__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
  13.391 +__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } 
  13.392 +#else
  13.393 +/* Make the default portio routines operate on quad 0 */
  13.394 +#define __IN(s,s1,i...) \
  13.395 +__IN1(s##_local) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
  13.396 +__IN1(s##_p_local) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
  13.397 +__INQ(s,s) \
  13.398 +__INQ(s,s##_p) 
  13.399 +#endif /* !CONFIG_MULTIQUAD || STANDALONE */
  13.400 +
  13.401 +#define __INS(s) \
  13.402 +static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
  13.403 +{ __asm__ __volatile__ ("rep ; ins" #s \
  13.404 +: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
  13.405 +
  13.406 +#define __OUTS(s) \
  13.407 +static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
  13.408 +{ __asm__ __volatile__ ("rep ; outs" #s \
  13.409 +: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
  13.410 +
  13.411 +#define RETURN_TYPE unsigned char
  13.412 +__IN(b,"")
  13.413 +#undef RETURN_TYPE
  13.414 +#define RETURN_TYPE unsigned short
  13.415 +__IN(w,"")
  13.416 +#undef RETURN_TYPE
  13.417 +#define RETURN_TYPE unsigned int
  13.418 +__IN(l,"")
  13.419 +#undef RETURN_TYPE
  13.420 +
  13.421 +__OUT(b,"b",char)
  13.422 +__OUT(w,"w",short)
  13.423 +__OUT(l,,int)
  13.424 +
  13.425 +__INS(b)
  13.426 +__INS(w)
  13.427 +__INS(l)
  13.428 +
  13.429 +__OUTS(b)
  13.430 +__OUTS(w)
  13.431 +__OUTS(l)
  13.432 +
  13.433 +#endif
    14.1 --- a/xenolinux-2.4.25-sparse/mkbuildtree	Tue Mar 23 08:30:15 2004 +0000
    14.2 +++ b/xenolinux-2.4.25-sparse/mkbuildtree	Tue Mar 23 09:33:07 2004 +0000
    14.3 @@ -137,8 +137,7 @@ ln -sf ../asm-i386/hardirq.h
    14.4  ln -sf ../asm-i386/hdreg.h 
    14.5  ln -sf ../asm-i386/i387.h 
    14.6  ln -sf ../asm-i386/ide.h 
    14.7 -ln -sf ../asm-i386/init.h 
    14.8 -ln -sf ../asm-i386/io.h
    14.9 +ln -sf ../asm-i386/init.h
   14.10  ln -sf ../asm-i386/io_apic.h
   14.11  ln -sf ../asm-i386/ioctl.h
   14.12  ln -sf ../asm-i386/ioctls.h