ia64/xen-unstable

changeset 1351:5e7d0e24bcca

bitkeeper revision 1.891.1.3 (409a27e1UZRfPLA6Sz-02GAVb3IlDw)

Merge scramble.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xeno
author kaf24@scramble.cl.cam.ac.uk
date Thu May 06 11:56:17 2004 +0000 (2004-05-06)
parents d7147f016bfd 430798be9fca
children 74d515393e65
files tools/xend/lib/domain_controller.h xen/common/memory.c xen/include/hypervisor-ifs/hypervisor-if.h xenolinux-2.4.26-sparse/arch/xen/defconfig-physdev xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/common.h xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/interface.c xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/main.c xenolinux-2.4.26-sparse/arch/xen/drivers/netif/frontend/main.c xenolinux-2.4.26-sparse/arch/xen/mm/hypervisor.c xenolinux-2.4.26-sparse/include/asm-xen/hypervisor.h
line diff
     1.1 --- a/tools/xend/lib/domain_controller.h	Thu May 06 11:55:12 2004 +0000
     1.2 +++ b/tools/xend/lib/domain_controller.h	Thu May 06 11:56:17 2004 +0000
     1.3 @@ -342,6 +342,7 @@ typedef struct {
     1.4      unsigned int handle;
     1.5      unsigned int status;
     1.6      unsigned int evtchn; /* status == NETIF_INTERFACE_STATUS_CONNECTED */
     1.7 +    u8           mac[6]; /* status == NETIF_INTERFACE_STATUS_CONNECTED */
     1.8  } netif_fe_interface_status_changed_t;
     1.9  
    1.10  /*
    1.11 @@ -373,7 +374,8 @@ typedef struct {
    1.12   */
    1.13  typedef struct {
    1.14      unsigned int  handle;
    1.15 -    unsigned long shmem_frame;
    1.16 +    unsigned long tx_shmem_frame;
    1.17 +    unsigned long rx_shmem_frame;
    1.18  } netif_fe_interface_connect_t;
    1.19  
    1.20  /*
    1.21 @@ -434,6 +436,7 @@ typedef struct {
    1.22      /* IN */
    1.23      domid_t        domid;             /* Domain attached to new interface.   */
    1.24      unsigned int   netif_handle;      /* Domain-specific interface handle.   */
    1.25 +    u8             mac[6];
    1.26      /* OUT */
    1.27      unsigned int   status;
    1.28  } netif_be_create_t; 
    1.29 @@ -463,7 +466,9 @@ typedef struct {
    1.30      domid_t        domid;             /* Domain attached to new interface.   */
    1.31      unsigned int   netif_handle;      /* Domain-specific interface handle.   */
    1.32      unsigned int   evtchn;            /* Event channel for notifications.    */
    1.33 -    unsigned long  shmem_frame;       /* Page cont. shared comms window.     */
    1.34 +    unsigned long  tx_shmem_frame;    /* Page cont. tx shared comms window.  */
    1.35 +    unsigned long  rx_shmem_frame;    /* Page cont. rx shared comms window.  */
    1.36 +    unsigned long  shmem_frame;       
    1.37      /* OUT */
    1.38      unsigned int   status;
    1.39  } netif_be_connect_t; 
     2.1 --- a/xen/common/memory.c	Thu May 06 11:55:12 2004 +0000
     2.2 +++ b/xen/common/memory.c	Thu May 06 11:56:17 2004 +0000
     2.3 @@ -915,7 +915,8 @@ static int do_extended_command(unsigned 
     2.4          break;
     2.5  
     2.6      case MMUEXT_SET_SUBJECTDOM_H:
     2.7 -        percpu_info[cpu].subject_id |= ((domid_t)((ptr&~0xFFFF)|(val>>16)))<<32;
     2.8 +        percpu_info[cpu].subject_id |= 
     2.9 +            ((domid_t)((ptr&~0xFFFF)|(val>>16)))<<32;
    2.10  
    2.11          if ( !IS_PRIV(current) )
    2.12          {
    2.13 @@ -939,6 +940,25 @@ static int do_extended_command(unsigned 
    2.14          }
    2.15          break;
    2.16  
    2.17 +    case MMUEXT_REASSIGN_PAGE:
    2.18 +        if ( !IS_PRIV(current) )
    2.19 +        {
    2.20 +            MEM_LOG("Dom %llu has no privilege to reassign page ownership",
    2.21 +                    current->domain);
    2.22 +            okay = 0;
    2.23 +        }
    2.24 +        else if ( percpu_info[cpu].gps != NULL )
    2.25 +        {
    2.26 +            page->u.domain = percpu_info[cpu].gps;
    2.27 +        }
    2.28 +        break;
    2.29 +
    2.30 +    case MMUEXT_RESET_SUBJECTDOM:
    2.31 +        if ( percpu_info[cpu].gps != NULL )
    2.32 +            put_task_struct(percpu_info[cpu].gps);
    2.33 +        percpu_info[cpu].gps = percpu_info[cpu].pts = NULL;
    2.34 +        break;
    2.35 +
    2.36      default:
    2.37          MEM_LOG("Invalid extended pt command 0x%08lx", val & MMUEXT_CMD_MASK);
    2.38          okay = 0;
     3.1 --- a/xen/include/hypervisor-ifs/hypervisor-if.h	Thu May 06 11:55:12 2004 +0000
     3.2 +++ b/xen/include/hypervisor-ifs/hypervisor-if.h	Thu May 06 11:56:17 2004 +0000
     3.3 @@ -127,6 +127,12 @@
     3.4   *   (ptr[31:15],val[31:15]) -- dom[63:32]
     3.5   *   NB. This command must be immediately preceded by SET_SUBJECTDOM_L.
     3.6   * 
     3.7 + *   val[7:0] == MMUEXT_REASSIGN_PAGE:
     3.8 + *   ptr[:2]  -- machine address within page to be reassigned to the GPS.
     3.9 + * 
    3.10 + *   val[7:0] == MMUEXT_RESET_SUBJECTDOM:
    3.11 + *   Resets both the GPS and the PTS to their defaults (i.e., calling domain).
    3.12 + * 
    3.13   * Notes on constraints on the above arguments:
    3.14   *  [1] The page frame containing the machine address must belong to the PTS.
    3.15   *  [2] If the PTE is valid (i.e., bit 0 is set) then the specified page frame
    3.16 @@ -151,6 +157,8 @@
    3.17  #define MMUEXT_SET_SUBJECTDOM_L  9 /* (ptr[31:15],val[31:15]) = dom[31:0]    */
    3.18  #define MMUEXT_SET_SUBJECTDOM_H 10 /* (ptr[31:15],val[31:15]) = dom[63:32]   */
    3.19  #define SET_PAGETABLE_SUBJECTDOM (1<<14) /* OR into 'val' arg of SUBJECTDOM_H*/
    3.20 +#define MMUEXT_REASSIGN_PAGE    11
    3.21 +#define MMUEXT_RESET_SUBJECTDOM 12
    3.22  #define MMUEXT_CMD_MASK        255
    3.23  #define MMUEXT_CMD_SHIFT         8
    3.24  
     4.1 --- a/xenolinux-2.4.26-sparse/arch/xen/defconfig-physdev	Thu May 06 11:55:12 2004 +0000
     4.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/defconfig-physdev	Thu May 06 11:56:17 2004 +0000
     4.3 @@ -89,19 +89,7 @@ CONFIG_BINFMT_ELF=y
     4.4  #
     4.5  # Parallel port support
     4.6  #
     4.7 -CONFIG_PARPORT=y
     4.8 -CONFIG_PARPORT_PC=y
     4.9 -# CONFIG_PARPORT_PC_FIFO is not set
    4.10 -# CONFIG_PARPORT_PC_SUPERIO is not set
    4.11 -# CONFIG_PARPORT_PC_PCMCIA is not set
    4.12 -# CONFIG_PARPORT_AMIGA is not set
    4.13 -# CONFIG_PARPORT_MFC3 is not set
    4.14 -# CONFIG_PARPORT_ATARI is not set
    4.15 -# CONFIG_PARPORT_GSC is not set
    4.16 -# CONFIG_PARPORT_SUNBPP is not set
    4.17 -# CONFIG_PARPORT_IP22 is not set
    4.18 -# CONFIG_PARPORT_OTHER is not set
    4.19 -CONFIG_PARPORT_1284=y
    4.20 +# CONFIG_PARPORT is not set
    4.21  
    4.22  #
    4.23  # Plug and Play configuration
    4.24 @@ -112,7 +100,7 @@ CONFIG_PNP=y
    4.25  #
    4.26  # Block devices
    4.27  #
    4.28 -CONFIG_BLK_DEV_FD=y
    4.29 +# CONFIG_BLK_DEV_FD is not set
    4.30  # CONFIG_BLK_DEV_XD is not set
    4.31  # CONFIG_PARIDE is not set
    4.32  # CONFIG_BLK_CPQ_DA is not set
    4.33 @@ -131,14 +119,14 @@ CONFIG_BLK_DEV_INITRD=y
    4.34  #
    4.35  # Multi-device support (RAID and LVM)
    4.36  #
    4.37 -CONFIG_MD=y
    4.38 -CONFIG_BLK_DEV_MD=y
    4.39 -CONFIG_MD_LINEAR=y
    4.40 -CONFIG_MD_RAID0=y
    4.41 -CONFIG_MD_RAID1=y
    4.42 -CONFIG_MD_RAID5=y
    4.43 -CONFIG_MD_MULTIPATH=y
    4.44 -CONFIG_BLK_DEV_LVM=y
    4.45 +# CONFIG_MD is not set
    4.46 +# CONFIG_BLK_DEV_MD is not set
    4.47 +# CONFIG_MD_LINEAR is not set
    4.48 +# CONFIG_MD_RAID0 is not set
    4.49 +# CONFIG_MD_RAID1 is not set
    4.50 +# CONFIG_MD_RAID5 is not set
    4.51 +# CONFIG_MD_MULTIPATH is not set
    4.52 +# CONFIG_BLK_DEV_LVM is not set
    4.53  
    4.54  #
    4.55  # Networking options
    4.56 @@ -234,7 +222,7 @@ CONFIG_IP_NF_TARGET_ULOG=y
    4.57  #
    4.58  # CONFIG_DEV_APPLETALK is not set
    4.59  # CONFIG_DECNET is not set
    4.60 -# CONFIG_BRIDGE is not set
    4.61 +CONFIG_BRIDGE=y
    4.62  # CONFIG_X25 is not set
    4.63  # CONFIG_LAPB is not set
    4.64  # CONFIG_LLC is not set
    4.65 @@ -380,14 +368,7 @@ CONFIG_CHR_DEV_SG=y
    4.66  # CONFIG_SCSI_AHA1740 is not set
    4.67  CONFIG_SCSI_AACRAID=y
    4.68  # CONFIG_SCSI_AIC7XXX is not set
    4.69 -CONFIG_SCSI_AIC79XX=y
    4.70 -CONFIG_AIC79XX_CMDS_PER_DEVICE=32
    4.71 -CONFIG_AIC79XX_RESET_DELAY_MS=15000
    4.72 -# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
    4.73 -# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
    4.74 -CONFIG_AIC79XX_DEBUG_ENABLE=y
    4.75 -CONFIG_AIC79XX_DEBUG_MASK=0
    4.76 -# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
    4.77 +# CONFIG_SCSI_AIC79XX is not set
    4.78  # CONFIG_SCSI_AIC7XXX_OLD is not set
    4.79  # CONFIG_SCSI_DPT_I2O is not set
    4.80  # CONFIG_SCSI_ADVANSYS is not set
    4.81 @@ -397,9 +378,9 @@ CONFIG_SCSI_MEGARAID=y
    4.82  # CONFIG_SCSI_MEGARAID2 is not set
    4.83  CONFIG_SCSI_BUSLOGIC=y
    4.84  # CONFIG_SCSI_OMIT_FLASHPOINT is not set
    4.85 -CONFIG_SCSI_CPQFCTS=y
    4.86 +# CONFIG_SCSI_CPQFCTS is not set
    4.87  # CONFIG_SCSI_DMX3191D is not set
    4.88 -CONFIG_SCSI_DTC3280=y
    4.89 +# CONFIG_SCSI_DTC3280 is not set
    4.90  # CONFIG_SCSI_EATA is not set
    4.91  # CONFIG_SCSI_EATA_DMA is not set
    4.92  # CONFIG_SCSI_EATA_PIO is not set
    4.93 @@ -409,15 +390,11 @@ CONFIG_SCSI_DTC3280=y
    4.94  # CONFIG_SCSI_IPS is not set
    4.95  # CONFIG_SCSI_INITIO is not set
    4.96  # CONFIG_SCSI_INIA100 is not set
    4.97 -# CONFIG_SCSI_PPA is not set
    4.98 -# CONFIG_SCSI_IMM is not set
    4.99  # CONFIG_SCSI_NCR53C406A is not set
   4.100  # CONFIG_SCSI_NCR53C7xx is not set
   4.101 -CONFIG_SCSI_SYM53C8XX_2=y
   4.102 -CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
   4.103 -CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
   4.104 -CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
   4.105 -# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
   4.106 +# CONFIG_SCSI_SYM53C8XX_2 is not set
   4.107 +# CONFIG_SCSI_NCR53C8XX is not set
   4.108 +# CONFIG_SCSI_SYM53C8XX is not set
   4.109  # CONFIG_SCSI_PAS16 is not set
   4.110  # CONFIG_SCSI_PCI2000 is not set
   4.111  # CONFIG_SCSI_PCI2220I is not set
   4.112 @@ -510,9 +487,7 @@ CONFIG_PCNET32=y
   4.113  # CONFIG_APRICOT is not set
   4.114  # CONFIG_B44 is not set
   4.115  # CONFIG_CS89x0 is not set
   4.116 -CONFIG_TULIP=y
   4.117 -# CONFIG_TULIP_MWI is not set
   4.118 -# CONFIG_TULIP_MMIO is not set
   4.119 +# CONFIG_TULIP is not set
   4.120  # CONFIG_DE4X5 is not set
   4.121  # CONFIG_DGRS is not set
   4.122  # CONFIG_DM9102 is not set
   4.123 @@ -545,8 +520,7 @@ CONFIG_TULIP=y
   4.124  #
   4.125  # Ethernet (1000 Mbit)
   4.126  #
   4.127 -CONFIG_ACENIC=y
   4.128 -# CONFIG_ACENIC_OMIT_TIGON_I is not set
   4.129 +# CONFIG_ACENIC is not set
   4.130  # CONFIG_DL2K is not set
   4.131  CONFIG_E1000=y
   4.132  # CONFIG_E1000_NAPI is not set
   4.133 @@ -621,9 +595,6 @@ CONFIG_VT_CONSOLE=y
   4.134  # CONFIG_SERIAL_NONSTANDARD is not set
   4.135  CONFIG_UNIX98_PTYS=y
   4.136  CONFIG_UNIX98_PTY_COUNT=256
   4.137 -# CONFIG_PRINTER is not set
   4.138 -# CONFIG_PPDEV is not set
   4.139 -# CONFIG_TIPAR is not set
   4.140  
   4.141  #
   4.142  # I2C support
   4.143 @@ -869,107 +840,7 @@ CONFIG_DUMMY_CONSOLE=y
   4.144  #
   4.145  # USB support
   4.146  #
   4.147 -CONFIG_USB=y
   4.148 -CONFIG_USB_DEBUG=y
   4.149 -
   4.150 -#
   4.151 -# Miscellaneous USB options
   4.152 -#
   4.153 -# CONFIG_USB_DEVICEFS is not set
   4.154 -# CONFIG_USB_BANDWIDTH is not set
   4.155 -
   4.156 -#
   4.157 -# USB Host Controller Drivers
   4.158 -#
   4.159 -# CONFIG_USB_EHCI_HCD is not set
   4.160 -CONFIG_USB_UHCI=y
   4.161 -# CONFIG_USB_UHCI_ALT is not set
   4.162 -CONFIG_USB_OHCI=y
   4.163 -# CONFIG_USB_SL811HS_ALT is not set
   4.164 -# CONFIG_USB_SL811HS is not set
   4.165 -
   4.166 -#
   4.167 -# USB Device Class drivers
   4.168 -#
   4.169 -# CONFIG_USB_AUDIO is not set
   4.170 -# CONFIG_USB_EMI26 is not set
   4.171 -# CONFIG_USB_BLUETOOTH is not set
   4.172 -# CONFIG_USB_MIDI is not set
   4.173 -# CONFIG_USB_STORAGE is not set
   4.174 -# CONFIG_USB_STORAGE_DEBUG is not set
   4.175 -# CONFIG_USB_STORAGE_DATAFAB is not set
   4.176 -# CONFIG_USB_STORAGE_FREECOM is not set
   4.177 -# CONFIG_USB_STORAGE_ISD200 is not set
   4.178 -# CONFIG_USB_STORAGE_DPCM is not set
   4.179 -# CONFIG_USB_STORAGE_HP8200e is not set
   4.180 -# CONFIG_USB_STORAGE_SDDR09 is not set
   4.181 -# CONFIG_USB_STORAGE_SDDR55 is not set
   4.182 -# CONFIG_USB_STORAGE_JUMPSHOT is not set
   4.183 -# CONFIG_USB_ACM is not set
   4.184 -# CONFIG_USB_PRINTER is not set
   4.185 -
   4.186 -#
   4.187 -# USB Human Interface Devices (HID)
   4.188 -#
   4.189 -# CONFIG_USB_HID is not set
   4.190 -
   4.191 -#
   4.192 -#     Input core support is needed for USB HID input layer or HIDBP support
   4.193 -#
   4.194 -# CONFIG_USB_HIDINPUT is not set
   4.195 -# CONFIG_USB_HIDDEV is not set
   4.196 -# CONFIG_USB_KBD is not set
   4.197 -# CONFIG_USB_MOUSE is not set
   4.198 -# CONFIG_USB_AIPTEK is not set
   4.199 -# CONFIG_USB_WACOM is not set
   4.200 -# CONFIG_USB_KBTAB is not set
   4.201 -# CONFIG_USB_POWERMATE is not set
   4.202 -
   4.203 -#
   4.204 -# USB Imaging devices
   4.205 -#
   4.206 -# CONFIG_USB_DC2XX is not set
   4.207 -# CONFIG_USB_MDC800 is not set
   4.208 -# CONFIG_USB_SCANNER is not set
   4.209 -# CONFIG_USB_MICROTEK is not set
   4.210 -# CONFIG_USB_HPUSBSCSI is not set
   4.211 -
   4.212 -#
   4.213 -# USB Multimedia devices
   4.214 -#
   4.215 -
   4.216 -#
   4.217 -#   Video4Linux support is needed for USB Multimedia device support
   4.218 -#
   4.219 -
   4.220 -#
   4.221 -# USB Network adaptors
   4.222 -#
   4.223 -# CONFIG_USB_PEGASUS is not set
   4.224 -# CONFIG_USB_RTL8150 is not set
   4.225 -# CONFIG_USB_KAWETH is not set
   4.226 -# CONFIG_USB_CATC is not set
   4.227 -# CONFIG_USB_CDCETHER is not set
   4.228 -# CONFIG_USB_USBNET is not set
   4.229 -
   4.230 -#
   4.231 -# USB port drivers
   4.232 -#
   4.233 -# CONFIG_USB_USS720 is not set
   4.234 -
   4.235 -#
   4.236 -# USB Serial Converter support
   4.237 -#
   4.238 -# CONFIG_USB_SERIAL is not set
   4.239 -
   4.240 -#
   4.241 -# USB Miscellaneous drivers
   4.242 -#
   4.243 -# CONFIG_USB_RIO500 is not set
   4.244 -# CONFIG_USB_AUERSWALD is not set
   4.245 -# CONFIG_USB_TIGL is not set
   4.246 -# CONFIG_USB_BRLVGER is not set
   4.247 -# CONFIG_USB_LCD is not set
   4.248 +# CONFIG_USB is not set
   4.249  
   4.250  #
   4.251  # Support for USB gadgets
     5.1 --- a/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/common.h	Thu May 06 11:55:12 2004 +0000
     5.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/common.h	Thu May 06 11:56:17 2004 +0000
     5.3 @@ -16,6 +16,7 @@
     5.4  #include <asm/ctrl_if.h>
     5.5  #include <asm/io.h>
     5.6  #include "../netif.h"
     5.7 +#include "../../../../../net/bridge/br_private.h"
     5.8  
     5.9  #ifndef NDEBUG
    5.10  #define ASSERT(_p) \
    5.11 @@ -28,7 +29,7 @@
    5.12  #define DPRINTK(_f, _a...) ((void)0)
    5.13  #endif
    5.14  
    5.15 -typedef struct {
    5.16 +typedef struct netif_st {
    5.17      /* Unique identifier for this interface. */
    5.18      domid_t          domid;
    5.19      unsigned int     handle;
    5.20 @@ -49,13 +50,7 @@ typedef struct {
    5.21      NETIF_RING_IDX tx_req_cons;
    5.22      NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
    5.23  
    5.24 -    /* Usage accounting */
    5.25 -    long long total_bytes_sent;
    5.26 -    long long total_bytes_received;
    5.27 -    long long total_packets_sent;
    5.28 -    long long total_packets_received;
    5.29 -
    5.30 -    /* Trasnmit shaping: allow 'credit_bytes' every 'credit_usec'. */
    5.31 +    /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
    5.32      unsigned long   credit_bytes;
    5.33      unsigned long   credit_usec;
    5.34      unsigned long   remaining_credit;
    5.35 @@ -72,7 +67,8 @@ typedef struct {
    5.36      struct list_head list;  /* scheduling list */
    5.37      atomic_t         refcnt;
    5.38      spinlock_t       rx_lock, tx_lock;
    5.39 -    unsigned char    vmac[ETH_ALEN];
    5.40 +    struct net_device *dev;
    5.41 +    struct net_device_stats stats;
    5.42  } netif_t;
    5.43  
    5.44  void netif_create(netif_be_create_t *create);
    5.45 @@ -93,6 +89,8 @@ void netif_ctrlif_init(void);
    5.46  
    5.47  void netif_deschedule(netif_t *netif);
    5.48  
    5.49 +int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
    5.50 +struct net_device_stats *netif_be_get_stats(struct net_device *dev);
    5.51  void netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
    5.52  
    5.53  #endif /* __NETIF__BACKEND__COMMON_H__ */
     6.1 --- a/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/interface.c	Thu May 06 11:55:12 2004 +0000
     6.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/interface.c	Thu May 06 11:56:17 2004 +0000
     6.3 @@ -12,8 +12,8 @@
     6.4  #define NETIF_HASH(_d,_h) \
     6.5      (((int)(_d)^(int)((_d)>>32)^(int)(_h))&(NETIF_HASHSZ-1))
     6.6  
     6.7 -static kmem_cache_t *netif_cachep;
     6.8 -static netif_t      *netif_hash[NETIF_HASHSZ];
     6.9 +static netif_t *netif_hash[NETIF_HASHSZ];
    6.10 +static struct net_device *bridge_dev;
    6.11  
    6.12  netif_t *netif_find_by_handle(domid_t domid, unsigned int handle)
    6.13  {
    6.14 @@ -35,7 +35,9 @@ void __netif_disconnect_complete(netif_t
    6.15       * must still be notified to the remote driver.
    6.16       */
    6.17      unbind_evtchn_from_irq(netif->evtchn);
    6.18 -    vfree(netif->net_ring_base);
    6.19 +    vfree(netif->tx); /* Frees netif->rx as well. */
    6.20 +    (void)br_del_if((struct net_bridge *)bridge_dev->priv, netif->dev);
    6.21 +    (void)dev_close(netif->dev);
    6.22  
    6.23      /* Construct the deferred response message. */
    6.24      cmsg.type         = CMSG_NETIF_BE;
    6.25 @@ -66,24 +68,32 @@ void __netif_disconnect_complete(netif_t
    6.26  
    6.27  void netif_create(netif_be_create_t *create)
    6.28  {
    6.29 -    domid_t       domid  = create->domid;
    6.30 -    unsigned int  handle = create->netif_handle;
    6.31 -    netif_t     **pnetif, *netif;
    6.32 +    domid_t            domid  = create->domid;
    6.33 +    unsigned int       handle = create->netif_handle;
    6.34 +    struct net_device *dev;
    6.35 +    netif_t          **pnetif, *netif;
    6.36  
    6.37 -    if ( (netif = kmem_cache_alloc(netif_cachep, GFP_ATOMIC)) == NULL )
    6.38 +    dev = alloc_netdev(sizeof(netif_t), "netif-be-%d", ether_setup);
    6.39 +    if ( dev == NULL )
    6.40      {
    6.41          DPRINTK("Could not create netif: out of memory\n");
    6.42          create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
    6.43          return;
    6.44      }
    6.45  
    6.46 +    netif = dev->priv;
    6.47      memset(netif, 0, sizeof(*netif));
    6.48      netif->domid  = domid;
    6.49      netif->handle = handle;
    6.50      netif->status = DISCONNECTED;
    6.51 -    spin_lock_init(&netif->vbd_lock);
    6.52 -    spin_lock_init(&netif->net_ring_lock);
    6.53 +    spin_lock_init(&netif->rx_lock);
    6.54 +    spin_lock_init(&netif->tx_lock);
    6.55      atomic_set(&netif->refcnt, 0);
    6.56 +    netif->dev = dev;
    6.57 +
    6.58 +    netif->credit_bytes = netif->remaining_credit = ~0UL;
    6.59 +    netif->credit_usec  = 0UL;
    6.60 +    /*init_ac_timer(&new_vif->credit_timeout);*/
    6.61  
    6.62      pnetif = &netif_hash[NETIF_HASH(domid, handle)];
    6.63      while ( *pnetif != NULL )
    6.64 @@ -92,12 +102,24 @@ void netif_create(netif_be_create_t *cre
    6.65          {
    6.66              DPRINTK("Could not create netif: already exists\n");
    6.67              create->status = NETIF_BE_STATUS_INTERFACE_EXISTS;
    6.68 -            kmem_cache_free(netif_cachep, netif);
    6.69 +            kfree(dev);
    6.70              return;
    6.71          }
    6.72          pnetif = &(*pnetif)->hash_next;
    6.73      }
    6.74  
    6.75 +    dev->hard_start_xmit = netif_be_start_xmit;
    6.76 +    dev->get_stats       = netif_be_get_stats;
    6.77 +    memcpy(dev->dev_addr, create->mac, ETH_ALEN);
    6.78 +    
    6.79 +    if ( register_netdev(dev) != 0 )
    6.80 +    {
    6.81 +        DPRINTK("Could not register new net device\n");
    6.82 +        create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
    6.83 +        kfree(dev);
    6.84 +        return;
    6.85 +    }
    6.86 +
    6.87      netif->hash_next = *pnetif;
    6.88      *pnetif = netif;
    6.89  
    6.90 @@ -132,8 +154,8 @@ void netif_destroy(netif_be_destroy_t *d
    6.91  
    6.92   destroy:
    6.93      *pnetif = netif->hash_next;
    6.94 -    destroy_all_vbds(netif);
    6.95 -    kmem_cache_free(netif_cachep, netif);
    6.96 +    unregister_netdev(netif->dev);
    6.97 +    kfree(netif->dev);
    6.98      destroy->status = NETIF_BE_STATUS_OKAY;
    6.99  }
   6.100  
   6.101 @@ -142,11 +164,13 @@ void netif_connect(netif_be_connect_t *c
   6.102      domid_t       domid  = connect->domid;
   6.103      unsigned int  handle = connect->netif_handle;
   6.104      unsigned int  evtchn = connect->evtchn;
   6.105 -    unsigned long shmem_frame = connect->shmem_frame;
   6.106 +    unsigned long tx_shmem_frame = connect->tx_shmem_frame;
   6.107 +    unsigned long rx_shmem_frame = connect->rx_shmem_frame;
   6.108      struct vm_struct *vma;
   6.109      pgprot_t      prot;
   6.110      int           error;
   6.111      netif_t      *netif;
   6.112 +    struct net_device *eth0_dev;
   6.113  
   6.114      netif = netif_find_by_handle(domid, handle);
   6.115      if ( unlikely(netif == NULL) )
   6.116 @@ -157,16 +181,27 @@ void netif_connect(netif_be_connect_t *c
   6.117          return;
   6.118      }
   6.119  
   6.120 -    if ( (vma = get_vm_area(PAGE_SIZE, VM_IOREMAP)) == NULL )
   6.121 +    if ( netif->status != DISCONNECTED )
   6.122 +    {
   6.123 +        connect->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
   6.124 +        return;
   6.125 +    }
   6.126 +
   6.127 +    if ( (vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP)) == NULL )
   6.128      {
   6.129          connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
   6.130          return;
   6.131      }
   6.132  
   6.133      prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
   6.134 -    error = direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(vma->addr),
   6.135 -                                    shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
   6.136 -                                    prot, domid);
   6.137 +    error  = direct_remap_area_pages(&init_mm, 
   6.138 +                                     VMALLOC_VMADDR(vma->addr),
   6.139 +                                     tx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
   6.140 +                                     prot, domid);
   6.141 +    error |= direct_remap_area_pages(&init_mm, 
   6.142 +                                     VMALLOC_VMADDR(vma->addr) + PAGE_SIZE,
   6.143 +                                     rx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
   6.144 +                                     prot, domid);
   6.145      if ( error != 0 )
   6.146      {
   6.147          if ( error == -ENOMEM )
   6.148 @@ -179,21 +214,27 @@ void netif_connect(netif_be_connect_t *c
   6.149          return;
   6.150      }
   6.151  
   6.152 -    if ( netif->status != DISCONNECTED )
   6.153 -    {
   6.154 -        connect->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
   6.155 -        vfree(vma->addr);
   6.156 -        return;
   6.157 -    }
   6.158 -
   6.159 -    netif->evtchn        = evtchn;
   6.160 -    netif->irq           = bind_evtchn_to_irq(evtchn);
   6.161 -    netif->shmem_frame   = shmem_frame;
   6.162 -    netif->net_ring_base = (netif_ring_t *)vma->addr;
   6.163 -    netif->status        = CONNECTED;
   6.164 +    netif->evtchn         = evtchn;
   6.165 +    netif->irq            = bind_evtchn_to_irq(evtchn);
   6.166 +    netif->tx_shmem_frame = tx_shmem_frame;
   6.167 +    netif->rx_shmem_frame = rx_shmem_frame;
   6.168 +    netif->tx             = 
   6.169 +        (netif_tx_interface_t *)vma->addr;
   6.170 +    netif->rx             = 
   6.171 +        (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
   6.172 +    netif->status         = CONNECTED;
   6.173      netif_get(netif);
   6.174  
   6.175 -    request_irq(netif->irq, netif_be_int, 0, "netif-backend", netif);
   6.176 +    (void)dev_open(netif->dev);
   6.177 +    (void)br_add_if((struct net_bridge *)bridge_dev->priv, netif->dev);
   6.178 +    /* At this point we try to ensure that eth0 is attached to the bridge. */
   6.179 +    if ( (eth0_dev = __dev_get_by_name("eth0")) != NULL )
   6.180 +    {
   6.181 +        (void)dev_open(eth0_dev);
   6.182 +        (void)br_add_if((struct net_bridge *)bridge_dev->priv, eth0_dev);
   6.183 +    }
   6.184 +    (void)request_irq(netif->irq, netif_be_int, 0, "netif-backend", netif);
   6.185 +    netif_start_queue(netif->dev);
   6.186  
   6.187      connect->status = NETIF_BE_STATUS_OKAY;
   6.188  }
   6.189 @@ -218,6 +259,7 @@ int netif_disconnect(netif_be_disconnect
   6.190          netif->status = DISCONNECTING;
   6.191          netif->disconnect_rspid = rsp_id;
   6.192          wmb(); /* Let other CPUs see the status change. */
   6.193 +        netif_stop_queue(netif->dev);
   6.194          free_irq(netif->irq, NULL);
   6.195          netif_deschedule(netif);
   6.196          netif_put(netif);
   6.197 @@ -226,105 +268,11 @@ int netif_disconnect(netif_be_disconnect
   6.198      return 0; /* Caller should not send response message. */
   6.199  }
   6.200  
   6.201 -net_vif_t *create_net_vif(domid_t dom)
   6.202 -{
   6.203 -    unsigned int idx;
   6.204 -    net_vif_t *new_vif = NULL;
   6.205 -    net_ring_t *new_ring = NULL;
   6.206 -    struct task_struct *p = NULL;
   6.207 -    unsigned long flags, vmac_hash;
   6.208 -    unsigned char vmac_key[ETH_ALEN + 2 + MAX_DOMAIN_NAME];
   6.209 -
   6.210 -    if ( (p = find_domain_by_id(dom)) == NULL )
   6.211 -        return NULL;
   6.212 -    
   6.213 -    write_lock_irqsave(&tasklist_lock, flags);
   6.214 -
   6.215 -    for ( idx = 0; idx < MAX_DOMAIN_VIFS; idx++ )
   6.216 -        if ( p->net_vif_list[idx] == NULL )
   6.217 -            break;
   6.218 -    if ( idx == MAX_DOMAIN_VIFS )
   6.219 -        goto fail;
   6.220 -
   6.221 -    if ( (new_vif = kmem_cache_alloc(net_vif_cache, GFP_KERNEL)) == NULL )
   6.222 -        goto fail;
   6.223 -
   6.224 -    memset(new_vif, 0, sizeof(*new_vif));
   6.225 -    
   6.226 -    if ( sizeof(net_ring_t) > PAGE_SIZE )
   6.227 -        BUG();
   6.228 -    new_ring = (net_ring_t *)get_free_page(GFP_KERNEL);
   6.229 -    clear_page(new_ring);
   6.230 -    SHARE_PFN_WITH_DOMAIN(virt_to_page(new_ring), p);
   6.231 -
   6.232 -    /*
   6.233 -     * Fill in the new vif struct. Note that, while the vif's refcnt is
   6.234 -     * non-zero, we hold a reference to the task structure.
   6.235 -     */
   6.236 -    atomic_set(&new_vif->refcnt, 1);
   6.237 -    new_vif->shared_rings = new_ring;
   6.238 -    new_vif->shared_idxs  = &p->shared_info->net_idx[idx];
   6.239 -    new_vif->domain       = p;
   6.240 -    new_vif->idx          = idx;
   6.241 -    new_vif->list.next    = NULL;
   6.242 -    spin_lock_init(&new_vif->rx_lock);
   6.243 -    spin_lock_init(&new_vif->tx_lock);
   6.244 -
   6.245 -    new_vif->credit_bytes = new_vif->remaining_credit = ~0UL;
   6.246 -    new_vif->credit_usec  = 0UL;
   6.247 -    init_ac_timer(&new_vif->credit_timeout);
   6.248 -
   6.249 -    if ( (p->domain == 0) && (idx == 0) )
   6.250 -    {
   6.251 -        /*
   6.252 -         * DOM0/VIF0 gets the real physical MAC address, so that users can
   6.253 -         * easily get a Xen-based machine up and running by using an existing
   6.254 -         * DHCP entry.
   6.255 -         */
   6.256 -        memcpy(new_vif->vmac, the_dev->dev_addr, ETH_ALEN);
   6.257 -    }
   6.258 -    else
   6.259 -    {
   6.260 -        /*
   6.261 -         * Most VIFs get a random MAC address with a "special" vendor id.
   6.262 -         * We try to get MAC addresses to be unique across multiple servers
   6.263 -         * by including the physical MAC address in the hash. The hash also
   6.264 -         * includes the vif index and the domain's name.
   6.265 -         * 
   6.266 -         * NB. The vendor is currently an "obsolete" one that used to belong
   6.267 -         * to DEC (AA-00-00). Using it is probably a bit rude :-)
   6.268 -         * 
   6.269 -         * NB2. The first bit of the first random octet is set to zero for
   6.270 -         * all dynamic MAC addresses. This may allow us to manually specify
   6.271 -         * MAC addresses for some VIFs with no fear of clashes.
   6.272 -         */
   6.273 -        memcpy(&vmac_key[0], the_dev->dev_addr, ETH_ALEN);
   6.274 -        *(__u16 *)(&vmac_key[ETH_ALEN]) = htons(idx);
   6.275 -        strcpy(&vmac_key[ETH_ALEN+2], p->name);
   6.276 -        vmac_hash = hash(vmac_key, ETH_ALEN + 2 + strlen(p->name));
   6.277 -        memcpy(new_vif->vmac, "\xaa\x00\x00", 3);
   6.278 -        new_vif->vmac[3] = (vmac_hash >> 16) & 0xef; /* First bit is zero. */
   6.279 -        new_vif->vmac[4] = (vmac_hash >>  8) & 0xff;
   6.280 -        new_vif->vmac[5] = (vmac_hash >>  0) & 0xff;
   6.281 -    }
   6.282 -
   6.283 -    p->net_vif_list[idx] = new_vif;
   6.284 -    
   6.285 -    write_unlock_irqrestore(&tasklist_lock, flags);
   6.286 -    return new_vif;
   6.287 -    
   6.288 - fail:
   6.289 -    write_unlock_irqrestore(&tasklist_lock, flags);
   6.290 -    if ( new_vif != NULL )
   6.291 -        kmem_cache_free(net_vif_cache, new_vif);
   6.292 -    if ( p != NULL )
   6.293 -        put_task_struct(p);
   6.294 -    return NULL;
   6.295 -}
   6.296 -
   6.297  void netif_interface_init(void)
   6.298  {
   6.299 -    netif_cachep = kmem_cache_create("netif_cache", sizeof(netif_t), 
   6.300 -                                     0, 0, NULL, NULL);
   6.301      memset(netif_hash, 0, sizeof(netif_hash));
   6.302 +    if ( br_add_bridge("netif-backend") != 0 )
   6.303 +        BUG();
   6.304 +    bridge_dev = __dev_get_by_name("netif-be-bridge");
   6.305 +    (void)dev_open(bridge_dev);
   6.306  }
     7.1 --- a/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/main.c	Thu May 06 11:55:12 2004 +0000
     7.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/main.c	Thu May 06 11:56:17 2004 +0000
     7.3 @@ -11,7 +11,10 @@
     7.4   */
     7.5  
     7.6  #include "common.h"
     7.7 +#include <asm/hypervisor-ifs/dom_mem_ops.h>
     7.8  
     7.9 +static void net_tx_action(unsigned long unused);
    7.10 +static void tx_skb_release(struct sk_buff *skb);
    7.11  static void make_tx_response(netif_t *netif, 
    7.12                               u16      id,
    7.13                               s8       st);
    7.14 @@ -21,38 +24,125 @@ static void make_rx_response(netif_t    
    7.15                               netif_addr_t addr,
    7.16                               u16          size);
    7.17  
    7.18 +static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
    7.19 +
    7.20  /* Don't currently gate addition of an interface to the tx scheduling list. */
    7.21  #define tx_work_exists(_if) (1)
    7.22  
    7.23  #define MAX_PENDING_REQS 256
    7.24 -static struct vm_struct *mmap_vma;
    7.25 -#define MMAP_VADDR(_req) ((unsigned long)mmap_vma->addr + ((_req) * PAGE_SIZE))
    7.26 +unsigned long mmap_vstart;
    7.27 +#define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
    7.28 +
    7.29 +#define PKT_PROT_LEN (ETH_HLEN + 20)
    7.30  
    7.31  /*static pending_req_t pending_reqs[MAX_PENDING_REQS];*/
    7.32 +static u16 pending_id[MAX_PENDING_REQS];
    7.33  static u16 pending_ring[MAX_PENDING_REQS];
    7.34  static spinlock_t pend_prod_lock = SPIN_LOCK_UNLOCKED;
    7.35 -/* NB. We use a different index type to differentiate from shared blk rings. */
    7.36  typedef unsigned int PEND_RING_IDX;
    7.37  #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
    7.38  static PEND_RING_IDX pending_prod, pending_cons;
    7.39  #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
    7.40  
    7.41 +static struct list_head net_schedule_list;
    7.42 +static spinlock_t net_schedule_list_lock;
    7.43 +
    7.44 +#define MAX_MFN_ALLOC 64
    7.45 +static unsigned long mfn_list[MAX_MFN_ALLOC];
    7.46 +static unsigned int alloc_index = 0;
    7.47 +static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
    7.48 +static void __refresh_mfn_list(void)
    7.49 +{
    7.50 +    int ret;
    7.51 +    dom_mem_op_t op;
    7.52 +    op.op = MEMOP_RESERVATION_INCREASE;
    7.53 +    op.u.increase.size  = MAX_MFN_ALLOC;
    7.54 +    op.u.increase.pages = mfn_list;
    7.55 +    if ( (ret = HYPERVISOR_dom_mem_op(&op)) != MAX_MFN_ALLOC )
    7.56 +    {
    7.57 +        printk(KERN_WARNING "Unable to increase memory reservation (%d)\n",
    7.58 +               ret);
    7.59 +        BUG();
    7.60 +    }
    7.61 +    alloc_index = MAX_MFN_ALLOC;
    7.62 +}
    7.63 +static unsigned long get_new_mfn(void)
    7.64 +{
    7.65 +    unsigned long mfn, flags;
    7.66 +    spin_lock_irqsave(&mfn_lock, flags);
    7.67 +    if ( alloc_index == 0 )
    7.68 +        __refresh_mfn_list();
    7.69 +    mfn = mfn_list[--alloc_index];
    7.70 +    spin_unlock_irqrestore(&mfn_lock, flags);
    7.71 +    return mfn;
    7.72 +}
    7.73 +static void dealloc_mfn(unsigned long mfn)
    7.74 +{
    7.75 +    unsigned long flags;
    7.76 +    spin_lock_irqsave(&mfn_lock, flags);
    7.77 +    mfn_list[alloc_index++] = mfn;
    7.78 +    spin_unlock_irqrestore(&mfn_lock, flags);
    7.79 +}
    7.80 +
    7.81 +static inline void maybe_schedule_tx_action(void)
    7.82 +{
    7.83 +    smp_mb();
    7.84 +    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
    7.85 +         !list_empty(&net_schedule_list) )
    7.86 +        tasklet_schedule(&net_tx_tasklet);
    7.87 +}
    7.88 +
    7.89  /*
    7.90   * This is the primary RECEIVE function for a network interface.
    7.91   * Note that, from the p.o.v. of /this/ OS it looks like a transmit.
    7.92   */
    7.93 -static void netif_start_xmit(struct sk_buff *skb, struct net_device *dev)
    7.94 +int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
    7.95  {
    7.96      netif_t *netif = (netif_t *)dev->priv;
    7.97 -    s8 status = BLKIF_RSP_OKAY;
    7.98 -    u16 size;
    7.99 -    mmu_update_t mmu[4];
   7.100 +    s8 status = NETIF_RSP_OKAY;
   7.101 +    u16 size, id;
   7.102 +    mmu_update_t mmu[6];
   7.103 +    pgd_t *pgd; pmd_t *pmd; pte_t *pte;
   7.104 +    unsigned long vdata, new_mfn;
   7.105 +
   7.106 +    /* Drop the packet if the target domain has no receive buffers. */
   7.107 +    if ( (netif->rx_req_cons == netif->rx->req_prod) ||
   7.108 +         ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
   7.109 +    {
   7.110 +        dev_kfree_skb(skb);
   7.111 +        return 0;
   7.112 +    }
   7.113  
   7.114 -    memcpy(skb->mac.ethernet->h_dest, netif->vmac, ETH_ALEN);
   7.115 -    if ( ntohs(skb->mac.ethernet->h_proto) == ETH_P_ARP )
   7.116 -        memcpy(skb->nh.raw + 18, netif->vmac, ETH_ALEN);
   7.117 +    id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_req_cons++)].req.id;
   7.118 + 
   7.119 +    /*
   7.120 +     * We do not copy the packet unless:
   7.121 +     *  1. It is fragmented; or
   7.122 +     *  2. It spans a page boundary; or
   7.123 +     *  3. We cannot be sure the whole data page is allocated.
   7.124 +     * The copying method is taken from skb_copy().
   7.125 +     */
   7.126 +    if ( (skb_shinfo(skb)->nr_frags != 0) ||
   7.127 +         (((unsigned long)skb->end ^ (unsigned long)skb->head) & PAGE_MASK) ||
   7.128 +         ((skb->end - skb->head) < (PAGE_SIZE/2)) )
   7.129 +    {
   7.130 +        struct sk_buff *nskb = dev_alloc_skb(PAGE_SIZE-1024);
   7.131 +        int hlen = skb->data - skb->head;
   7.132 +        skb_reserve(nskb, hlen);
   7.133 +        skb_put(nskb, skb->len);
   7.134 +        (void)skb_copy_bits(skb, -hlen, nskb->head, hlen + skb->len);
   7.135 +        dev_kfree_skb(skb);
   7.136 +        skb = nskb;
   7.137 +    }
   7.138  
   7.139 -    spin_lock(&netif->rx_lock);
   7.140 +    vdata = (unsigned long)skb->data;
   7.141 +    size  = skb->tail - skb->data;
   7.142 +
   7.143 +    new_mfn = get_new_mfn();
   7.144 +
   7.145 +    pgd = pgd_offset_k(   (vdata & PAGE_MASK));
   7.146 +    pmd = pmd_offset(pgd, (vdata & PAGE_MASK));
   7.147 +    pte = pte_offset(pmd, (vdata & PAGE_MASK));
   7.148  
   7.149      mmu[0].val  = (unsigned long)(netif->domid<<16) & ~0xFFFFUL;
   7.150      mmu[0].ptr  = (unsigned long)(netif->domid<< 0) & ~0xFFFFUL;
   7.151 @@ -63,49 +153,43 @@ static void netif_start_xmit(struct sk_b
   7.152      mmu[1].ptr |= MMU_EXTENDED_COMMAND;
   7.153      mmu[1].val |= MMUEXT_SET_SUBJECTDOM_H;
   7.154  
   7.155 -    mmu[2].ptr  = ptr | MMU_EXTENDED_COMMAND;
   7.156 +    mmu[2].ptr  = virt_to_machine(vdata & PAGE_MASK) | MMU_EXTENDED_COMMAND;
   7.157      mmu[2].val  = MMUEXT_REASSIGN_PAGE;
   7.158  
   7.159 -    mmu[3].ptr  = ppte;
   7.160 -    mmu[3].val  = newpage;
   7.161 +    mmu[3].ptr  = MMU_EXTENDED_COMMAND;
   7.162 +    mmu[3].val  = MMUEXT_RESET_SUBJECTDOM;
   7.163 +
   7.164 +    mmu[4].ptr  = virt_to_machine(pte);
   7.165 +    mmu[4].val  = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL;
   7.166  
   7.167 -    if ( unlikely(HYPERVISOR_mmu_update(mmu, 4) < 0) )
   7.168 +    mmu[5].ptr  = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
   7.169 +    mmu[5].val  = __pa(vdata) >> PAGE_SHIFT;
   7.170 +
   7.171 +    if ( unlikely(HYPERVISOR_mmu_update(mmu, 6) < 0) )
   7.172      {
   7.173 -        status = BLKIF_RSP_ERROR;
   7.174 +        dealloc_mfn(new_mfn);
   7.175 +        status = NETIF_RSP_ERROR;
   7.176          goto out;
   7.177      }
   7.178  
   7.179 -    /* Record this so they can be billed. */
   7.180 -    netif->total_packets_received++;
   7.181 -    netif->total_bytes_received += size;
   7.182 +    phys_to_machine_mapping[__pa(vdata) >> PAGE_SHIFT] = new_mfn;
   7.183 +
   7.184 +    netif->stats.tx_bytes += size;
   7.185 +    netif->stats.tx_packets++;
   7.186  
   7.187   out:
   7.188 -    make_rx_response(netif, rx->id, status, addr, size);
   7.189 +    spin_lock(&netif->rx_lock);
   7.190 +    make_rx_response(netif, id, status, virt_to_machine(vdata), size);
   7.191      spin_unlock(&netif->rx_lock);    
   7.192      dev_kfree_skb(skb);
   7.193 +    return 0;
   7.194  }
   7.195  
   7.196 -
   7.197 -/*************************************************************
   7.198 - * NEW TRANSMIT SCHEDULER
   7.199 - * 
   7.200 - * NB. We ought also to only send a limited number of bytes to the NIC
   7.201 - * for transmission at any one time (to avoid head-of-line blocking).
   7.202 - * However, driver rings are small enough that they provide a reasonable
   7.203 - * limit.
   7.204 - * 
   7.205 - * eg. 3c905 has 16 descriptors == 8 packets, at 100Mbps
   7.206 - *     e1000 has 256 descriptors == 128 packets, at 1000Mbps
   7.207 - *     tg3 has 512 descriptors == 256 packets, at 1000Mbps
   7.208 - * 
   7.209 - * So, worst case is tg3 with 256 1500-bytes packets == 375kB.
   7.210 - * This would take 3ms, and represents our worst-case HoL blocking cost.
   7.211 - * 
   7.212 - * We think this is reasonable.
   7.213 - */
   7.214 -
   7.215 -struct list_head net_schedule_list;
   7.216 -spinlock_t net_schedule_list_lock;
   7.217 +struct net_device_stats *netif_be_get_stats(struct net_device *dev)
   7.218 +{
   7.219 +    netif_t *netif = dev->priv;
   7.220 +    return &netif->stats;
   7.221 +}
   7.222  
   7.223  static int __on_net_schedule_list(netif_t *netif)
   7.224  {
   7.225 @@ -128,7 +212,7 @@ static void add_to_net_schedule_list_tai
   7.226          return;
   7.227  
   7.228      spin_lock(&net_schedule_list_lock);
   7.229 -    if ( likely(!__on_net_schedule_list(netif)) )
   7.230 +    if ( !__on_net_schedule_list(netif) && (netif->status == CONNECTED) )
   7.231      {
   7.232          list_add_tail(&netif->list, &net_schedule_list);
   7.233          netif_get(netif);
   7.234 @@ -136,34 +220,12 @@ static void add_to_net_schedule_list_tai
   7.235      spin_unlock(&net_schedule_list_lock);
   7.236  }
   7.237  
   7.238 -
   7.239 -static void tx_skb_release(struct sk_buff *skb);
   7.240 -    
   7.241 -static inline int init_tx_header(netif_t *netif, u8 *data, 
   7.242 -                                 unsigned int len, struct net_device *dev)
   7.243 +void netif_deschedule(netif_t *netif)
   7.244  {
   7.245 -    int proto = ntohs(*(unsigned short *)(data + 12));
   7.246 -
   7.247 -    memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
   7.248 -        
   7.249 -    switch ( proto )
   7.250 -    {
   7.251 -    case ETH_P_ARP:
   7.252 -        if ( len < 42 ) break;
   7.253 -        memcpy(data + 22, dev->dev_addr, ETH_ALEN);
   7.254 -        break;
   7.255 -    case ETH_P_IP:
   7.256 -        break;
   7.257 -    default:
   7.258 -        /* Unsupported protocols are onyl allowed to/from NETIF0/0. */
   7.259 -        if ( (netif->domain->domain != 0) || (netif->idx != 0) )
   7.260 -            proto = 0;
   7.261 -        break;
   7.262 -    }
   7.263 -    return proto;
   7.264 +    remove_from_net_schedule_list(netif);
   7.265  }
   7.266  
   7.267 -
   7.268 +#if 0
   7.269  static void tx_credit_callback(unsigned long data)
   7.270  {
   7.271      netif_t *netif = (netif_t *)data;
   7.272 @@ -176,6 +238,7 @@ static void tx_credit_callback(unsigned 
   7.273          maybe_schedule_tx_action();
   7.274      }    
   7.275  }
   7.276 +#endif
   7.277  
   7.278  static void net_tx_action(unsigned long unused)
   7.279  {
   7.280 @@ -184,6 +247,7 @@ static void net_tx_action(unsigned long 
   7.281      netif_t *netif;
   7.282      netif_tx_request_t txreq;
   7.283      u16 pending_idx;
   7.284 +    NETIF_RING_IDX i;
   7.285      pgprot_t prot = __pgprot(_PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED);
   7.286  
   7.287      while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
   7.288 @@ -197,7 +261,7 @@ static void net_tx_action(unsigned long 
   7.289  
   7.290          /* Work to do? */
   7.291          i = netif->tx_req_cons;
   7.292 -        if ( (i == shared_idxs->tx_req_prod) && 
   7.293 +        if ( (i == netif->tx->req_prod) && 
   7.294               ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
   7.295          {
   7.296              netif_put(netif);
   7.297 @@ -246,7 +310,7 @@ static void net_tx_action(unsigned long 
   7.298          /* No crossing a page boundary as the payload mustn't fragment. */
   7.299          if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) ) 
   7.300          {
   7.301 -            DPRINTK("tx.addr: %lx, size: %u, end: %lu\n", 
   7.302 +            DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 
   7.303                      txreq.addr, txreq.size, 
   7.304                      (txreq.addr &~PAGE_MASK) + txreq.size);
   7.305              make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
   7.306 @@ -262,42 +326,38 @@ static void net_tx_action(unsigned long 
   7.307                                       PAGE_SIZE, prot, netif->domid) != 0 )
   7.308          {
   7.309              DPRINTK("Bad page frame\n");
   7.310 -            make_tx_response(netif, tx.id, NETIF_RSP_ERROR);
   7.311 +            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
   7.312              netif_put(netif);
   7.313              continue;
   7.314          }
   7.315 -            
   7.316 +        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
   7.317 +            txreq.addr >> PAGE_SHIFT;
   7.318 +
   7.319          if ( unlikely((skb = alloc_skb(PKT_PROT_LEN, GFP_ATOMIC)) == NULL) )
   7.320          {
   7.321 -            make_tx_response(netif, tx.id, BLKIF_RSP_ERROR);
   7.322 +            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
   7.323              netif_put(netif);
   7.324              vmfree_area_pages(MMAP_VADDR(pending_idx), PAGE_SIZE);
   7.325              break;
   7.326          }
   7.327          
   7.328 -        __skb_put(PKT_PROT_LEN);
   7.329 -        memcpy(skb->data, src, PKT_PROT_LEN);
   7.330 -        protocol = __constant_htons(
   7.331 -            init_tx_header(netif, g_data, tx.size, the_dev));
   7.332 -        if ( protocol == 0 )
   7.333 -        {
   7.334 -            make_tx_response(netif, tx.id, NETIF_RSP_ERROR);
   7.335 -            netif_put(netif);
   7.336 -            dev_kfree_skb(skb);
   7.337 -            goto cleanup_and_continue;
   7.338 -        }
   7.339 +        __skb_put(skb, PKT_PROT_LEN);
   7.340 +        memcpy(skb->data, 
   7.341 +               (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
   7.342 +               PKT_PROT_LEN);
   7.343  
   7.344          skb->dev        = netif->dev;
   7.345          skb->protocol   = eth_type_trans(skb, skb->dev);
   7.346          
   7.347          /* Append the packet payload as a fragment. */
   7.348          skb_shinfo(skb)->frags[0].page        = 
   7.349 -          &mem_map[txreq.addr >> PAGE_SHIFT];
   7.350 -        skb_shinfo(skb)->frags[0].size        = txreq.size - PKT_PROT_LEN;
   7.351 +            virt_to_page(MMAP_VADDR(pending_idx));
   7.352 +        skb_shinfo(skb)->frags[0].size        =
   7.353 +            txreq.size - PKT_PROT_LEN;
   7.354          skb_shinfo(skb)->frags[0].page_offset = 
   7.355              (txreq.addr + PKT_PROT_LEN) & ~PAGE_MASK;
   7.356          skb_shinfo(skb)->nr_frags = 1;
   7.357 -        skb->data_len  = tx->size - PKT_PROT_LEN;
   7.358 +        skb->data_len  = txreq.size - PKT_PROT_LEN;
   7.359          skb->len      += skb->data_len;
   7.360  
   7.361          /* Destructor information. */
   7.362 @@ -305,33 +365,22 @@ static void net_tx_action(unsigned long 
   7.363          skb_shinfo(skb)->frags[MAX_SKB_FRAGS-1].page = (struct page *)netif;
   7.364          skb_shinfo(skb)->frags[MAX_SKB_FRAGS-1].size = pending_idx;
   7.365  
   7.366 -        /* Record the transmission so they can be billed. */
   7.367 -        netif->total_packets_sent++;
   7.368 -        netif->total_bytes_sent += tx->size;
   7.369 +        netif->stats.rx_bytes += txreq.size;
   7.370 +        netif->stats.rx_packets++;
   7.371  
   7.372 +        pending_id[pending_idx] = txreq.id;
   7.373          pending_cons++;
   7.374 +
   7.375          netif_rx(skb);
   7.376          netif->dev->last_rx = jiffies;
   7.377      }
   7.378  }
   7.379  
   7.380 -DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
   7.381 -
   7.382 -
   7.383 -static inline void maybe_schedule_tx_action(void)
   7.384 -{
   7.385 -    smp_mb();
   7.386 -    if ( !netif_queue_stopped(the_dev) &&
   7.387 -         !list_empty(&net_schedule_list) )
   7.388 -        tasklet_schedule(&net_tx_tasklet);
   7.389 -}
   7.390 -
   7.391 -
   7.392  /* Destructor function for tx skbs. */
   7.393  static void tx_skb_release(struct sk_buff *skb)
   7.394  {
   7.395 -    int i;
   7.396 -    netif_t *netif = (netif_t)skb_shinfo(skb)->frags[MAX_SKB_FRAGS-1].page;
   7.397 +    unsigned long flags;
   7.398 +    netif_t *netif = (netif_t *)skb_shinfo(skb)->frags[MAX_SKB_FRAGS-1].page;
   7.399      u16 pending_idx = skb_shinfo(skb)->frags[MAX_SKB_FRAGS-1].size;
   7.400  
   7.401      vmfree_area_pages(MMAP_VADDR(pending_idx), PAGE_SIZE);
   7.402 @@ -339,25 +388,19 @@ static void tx_skb_release(struct sk_buf
   7.403      skb_shinfo(skb)->nr_frags = 0; 
   7.404      
   7.405      spin_lock(&netif->tx_lock);
   7.406 -    make_tx_response(netif, skb->guest_id, NETIF_RSP_OKAY);
   7.407 +    make_tx_response(netif, pending_id[pending_idx], NETIF_RSP_OKAY);
   7.408      spin_unlock(&netif->tx_lock);
   7.409      
   7.410 -    /*
   7.411 -     * Checks below must happen after the above response is posted. This avoids
   7.412 -     * a possible race with a guest OS on another CPU.
   7.413 -     */
   7.414 -    mb();
   7.415 -    
   7.416 -    if ( tx_work_exists(netif) )
   7.417 -    {
   7.418 -        add_to_net_schedule_list_tail(netif);
   7.419 -        maybe_schedule_tx_action();        
   7.420 -    }
   7.421 -    
   7.422      netif_put(netif);
   7.423 + 
   7.424 +    spin_lock_irqsave(&pend_prod_lock, flags);
   7.425 +    pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   7.426 +    spin_unlock_irqrestore(&pend_prod_lock, flags);
   7.427 + 
   7.428 +    maybe_schedule_tx_action();        
   7.429  }
   7.430  
   7.431 -
   7.432 +#if 0
   7.433  long flush_bufs_for_netif(netif_t *netif)
   7.434  {
   7.435      NET_RING_IDX i;
   7.436 @@ -395,6 +438,7 @@ long flush_bufs_for_netif(netif_t *netif
   7.437  
   7.438      return 0;
   7.439  }
   7.440 +#endif
   7.441  
   7.442  void netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
   7.443  {
   7.444 @@ -424,7 +468,6 @@ static void make_tx_response(netif_t *ne
   7.445          notify_via_evtchn(netif->evtchn);
   7.446  }
   7.447  
   7.448 -
   7.449  static void make_rx_response(netif_t     *netif, 
   7.450                               u16          id, 
   7.451                               s8           st,
   7.452 @@ -448,28 +491,18 @@ static void make_rx_response(netif_t    
   7.453          notify_via_evtchn(netif->evtchn);
   7.454  }
   7.455  
   7.456 -
   7.457  static int __init init_module(void)
   7.458  {
   7.459      netif_interface_init();
   7.460 -
   7.461 -    if ( (mmap_vma = get_vm_area(MAX_PENDING_REQS * PAGE_SIZE, 
   7.462 -                                 VM_IOREMAP)) == NULL )
   7.463 -    {
   7.464 -        printk(KERN_WARNING "Could not allocate VMA for netif backend.\n");
   7.465 -        return -ENOMEM;
   7.466 -    }
   7.467 -
   7.468 +    mmap_vstart = allocate_empty_lowmem_region(MAX_PENDING_REQS);
   7.469      netif_ctrlif_init();
   7.470 -
   7.471      return 0;
   7.472  }
   7.473  
   7.474 -
   7.475  static void cleanup_module(void)
   7.476  {
   7.477 +    BUG();
   7.478  }
   7.479  
   7.480 -
   7.481  module_init(init_module);
   7.482  module_exit(cleanup_module);
     8.1 --- a/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/frontend/main.c	Thu May 06 11:55:12 2004 +0000
     8.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/frontend/main.c	Thu May 06 11:56:17 2004 +0000
     8.3 @@ -25,6 +25,10 @@
     8.4  #include <net/sock.h>
     8.5  #include <net/pkt_sched.h>
     8.6  
     8.7 +#include "../netif.h"
     8.8 +
     8.9 +static struct tq_struct netif_statechange_tq;
    8.10 +
    8.11  #define RX_BUF_SIZE ((PAGE_SIZE/2)+1) /* Fool the slab allocator :-) */
    8.12  
    8.13  static void network_interrupt(int irq, void *dev_id, struct pt_regs *ptregs);
    8.14 @@ -44,17 +48,21 @@ struct net_private
    8.15  
    8.16      struct net_device_stats stats;
    8.17      NET_RING_IDX rx_resp_cons, tx_resp_cons;
    8.18 -    unsigned int net_ring_fixmap_idx, tx_full;
    8.19 -    net_ring_t  *net_ring;
    8.20 -    net_idx_t   *net_idx;
    8.21 +    unsigned int tx_full;
    8.22 +    
    8.23 +    netif_tx_interface_t *tx;
    8.24 +    netif_rx_interface_t *rx;
    8.25 +
    8.26      spinlock_t   tx_lock;
    8.27 -    unsigned int idx; /* Domain-specific index of this VIF. */
    8.28  
    8.29 -    unsigned int rx_bufs_to_notify;
    8.30 +    unsigned int handle;
    8.31 +    unsigned int evtchn;
    8.32 +    unsigned int irq;
    8.33  
    8.34 -#define STATE_ACTIVE    0
    8.35 -#define STATE_SUSPENDED 1
    8.36 -#define STATE_CLOSED    2
    8.37 +#define NETIF_STATE_CLOSED       0
    8.38 +#define NETIF_STATE_DISCONNECTED 1
    8.39 +#define NETIF_STATE_CONNECTED    2
    8.40 +#define NETIF_STATE_ACTIVE       3
    8.41      unsigned int state;
    8.42  
    8.43      /*
    8.44 @@ -75,36 +83,17 @@ struct net_private
    8.45      (unsigned short)_id; })
    8.46  
    8.47  
    8.48 -static void _dbg_network_int(struct net_device *dev)
    8.49 -{
    8.50 -    struct net_private *np = dev->priv;
    8.51 -
    8.52 -    if ( np->state == STATE_CLOSED )
    8.53 -        return;
    8.54 -    
    8.55 -    printk(KERN_ALERT "net: tx_full=%d, tx_resp_cons=0x%08x,"
    8.56 -           " tx_req_prod=0x%08x\nnet: tx_resp_prod=0x%08x,"
    8.57 -           " tx_event=0x%08x, state=%d\n",
    8.58 -           np->tx_full, np->tx_resp_cons, 
    8.59 -           np->net_idx->tx_req_prod, np->net_idx->tx_resp_prod, 
    8.60 -           np->net_idx->tx_event,
    8.61 -           test_bit(__LINK_STATE_XOFF, &dev->state));
    8.62 -    printk(KERN_ALERT "net: rx_resp_cons=0x%08x,"
    8.63 -           " rx_req_prod=0x%08x\nnet: rx_resp_prod=0x%08x, rx_event=0x%08x\n",
    8.64 -           np->rx_resp_cons, np->net_idx->rx_req_prod,
    8.65 -           np->net_idx->rx_resp_prod, np->net_idx->rx_event);
    8.66 -}
    8.67 -
    8.68 -
    8.69 -static void dbg_network_int(int irq, void *unused, struct pt_regs *ptregs)
    8.70 +static struct net_device *find_dev_by_handle(unsigned int handle)
    8.71  {
    8.72      struct list_head *ent;
    8.73      struct net_private *np;
    8.74      list_for_each ( ent, &dev_list )
    8.75      {
    8.76          np = list_entry(ent, struct net_private, list);
    8.77 -        _dbg_network_int(np->dev);
    8.78 +        if ( np->handle == handle )
    8.79 +            return np;
    8.80      }
    8.81 +    return NULL;
    8.82  }
    8.83  
    8.84  
    8.85 @@ -114,36 +103,12 @@ static int network_open(struct net_devic
    8.86      netop_t netop;
    8.87      int i, ret;
    8.88  
    8.89 -    netop.cmd = NETOP_RESET_RINGS;
    8.90 -    netop.vif = np->idx;
    8.91 -    if ( (ret = HYPERVISOR_net_io_op(&netop)) != 0 )
    8.92 -    {
    8.93 -        printk(KERN_ALERT "Possible net trouble: couldn't reset ring idxs\n");
    8.94 -        return ret;
    8.95 -    }
    8.96 +    if ( np->state != NETIF_STATE_CONNECTED )
    8.97 +        return -EINVAL;
    8.98  
    8.99 -    netop.cmd = NETOP_GET_VIF_INFO;
   8.100 -    netop.vif = np->idx;
   8.101 -    if ( (ret = HYPERVISOR_net_io_op(&netop)) != 0 )
   8.102 -    {
   8.103 -        printk(KERN_ALERT "Couldn't get info for vif %d\n", np->idx);
   8.104 -        return ret;
   8.105 -    }
   8.106 -
   8.107 -    memcpy(dev->dev_addr, netop.u.get_vif_info.vmac, ETH_ALEN);
   8.108 -
   8.109 -    set_fixmap(FIX_NETRING0_BASE + np->net_ring_fixmap_idx, 
   8.110 -               netop.u.get_vif_info.ring_mfn << PAGE_SHIFT);
   8.111 -    np->net_ring = (net_ring_t *)fix_to_virt(
   8.112 -        FIX_NETRING0_BASE + np->net_ring_fixmap_idx);
   8.113 -    np->net_idx  = &HYPERVISOR_shared_info->net_idx[np->idx];
   8.114 -
   8.115 -    np->rx_bufs_to_notify = 0;
   8.116      np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
   8.117      memset(&np->stats, 0, sizeof(np->stats));
   8.118      spin_lock_init(&np->tx_lock);
   8.119 -    memset(np->net_ring, 0, sizeof(*np->net_ring));
   8.120 -    memset(np->net_idx, 0, sizeof(*np->net_idx));
   8.121  
   8.122      /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
   8.123      for ( i = 0; i <= XENNET_TX_RING_SIZE; i++ )
   8.124 @@ -152,7 +117,7 @@ static int network_open(struct net_devic
   8.125          np->rx_skbs[i] = (void *)(i+1);
   8.126  
   8.127      wmb();
   8.128 -    np->state = STATE_ACTIVE;
   8.129 +    np->state = NETIF_STATE_ACTIVE;
   8.130  
   8.131      network_alloc_rx_buffers(dev);
   8.132  
   8.133 @@ -203,7 +168,7 @@ static void network_tx_buf_gc(struct net
   8.134           ((np->net_idx->tx_req_prod - prod) < XENNET_TX_RING_SIZE) )
   8.135      {
   8.136          np->tx_full = 0;
   8.137 -        if ( np->state == STATE_ACTIVE )
   8.138 +        if ( np->state == NETIF_STATE_ACTIVE )
   8.139              netif_wake_queue(dev);
   8.140      }
   8.141  }
   8.142 @@ -228,7 +193,7 @@ static void network_alloc_rx_buffers(str
   8.143      NET_RING_IDX i = np->net_idx->rx_req_prod;
   8.144  
   8.145      if ( unlikely((i - np->rx_resp_cons) == XENNET_RX_RING_SIZE) || 
   8.146 -         unlikely(np->state != STATE_ACTIVE) )
   8.147 +         unlikely(np->state != NETIF_STATE_ACTIVE) )
   8.148          return;
   8.149  
   8.150      do {
   8.151 @@ -341,17 +306,15 @@ static int network_start_xmit(struct sk_
   8.152  }
   8.153  
   8.154  
   8.155 -static inline void _network_interrupt(struct net_device *dev)
   8.156 +static void netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
   8.157  {
   8.158 +    struct net_device *dev = dev_id;
   8.159      struct net_private *np = dev->priv;
   8.160      unsigned long flags;
   8.161      struct sk_buff *skb;
   8.162      rx_resp_entry_t *rx;
   8.163      NET_RING_IDX i;
   8.164  
   8.165 -    if ( unlikely(np->state == STATE_CLOSED) )
   8.166 -        return;
   8.167 -    
   8.168      spin_lock_irqsave(&np->tx_lock, flags);
   8.169      network_tx_buf_gc(dev);
   8.170      spin_unlock_irqrestore(&np->tx_lock, flags);
   8.171 @@ -367,7 +330,7 @@ static inline void _network_interrupt(st
   8.172          if ( unlikely(rx->status != RING_STATUS_OK) )
   8.173          {
   8.174              /* Gate this error. We get a (valid) slew of them on suspend. */
   8.175 -            if ( np->state == STATE_ACTIVE )
   8.176 +            if ( np->state == NETIF_STATE_ACTIVE )
   8.177                  printk(KERN_ALERT "bad buffer on RX ring!(%d)\n", rx->status);
   8.178              dev_kfree_skb_any(skb);
   8.179              continue;
   8.180 @@ -407,26 +370,11 @@ static inline void _network_interrupt(st
   8.181  }
   8.182  
   8.183  
   8.184 -static void network_interrupt(int irq, void *unused, struct pt_regs *ptregs)
   8.185 -{
   8.186 -    struct list_head *ent;
   8.187 -    struct net_private *np;
   8.188 -    list_for_each ( ent, &dev_list )
   8.189 -    {
   8.190 -        np = list_entry(ent, struct net_private, list);
   8.191 -        _network_interrupt(np->dev);
   8.192 -    }
   8.193 -}
   8.194 -
   8.195 -
   8.196  static int network_close(struct net_device *dev)
   8.197  {
   8.198      struct net_private *np = dev->priv;
   8.199      netop_t netop;
   8.200  
   8.201 -    np->state = STATE_SUSPENDED;
   8.202 -    wmb();
   8.203 -
   8.204      netif_stop_queue(np->dev);
   8.205  
   8.206      netop.cmd = NETOP_FLUSH_BUFFERS;
   8.207 @@ -442,12 +390,9 @@ static int network_close(struct net_devi
   8.208      }
   8.209  
   8.210      wmb();
   8.211 -    np->state = STATE_CLOSED;
   8.212 +    np->state = NETIF_STATE_CONNECTED;
   8.213      wmb();
   8.214  
   8.215 -    /* Now no longer safe to take interrupts for this device. */
   8.216 -    clear_fixmap(FIX_NETRING0_BASE + np->net_ring_fixmap_idx);
   8.217 -
   8.218      MOD_DEC_USE_COUNT;
   8.219  
   8.220      return 0;
   8.221 @@ -461,72 +406,181 @@ static struct net_device_stats *network_
   8.222  }
   8.223  
   8.224  
   8.225 +static void netif_bringup_phase1(void *unused)
   8.226 +{
   8.227 +    ctrl_msg_t                   cmsg;
   8.228 +    netif_fe_interface_connect_t up;
   8.229 +    struct net_device *dev;
   8.230 +    struct net_private *np;
   8.231 +
   8.232 +    dev = find_dev_by_handle(0);
   8.233 +    np  = dev->priv;
   8.234 +    
   8.235 +    /* Move from CLOSED to DISCONNECTED state. */
   8.236 +    np->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
   8.237 +    np->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
   8.238 +    memset(np->tx, 0, PAGE_SIZE);
   8.239 +    memset(np->rx, 0, PAGE_SIZE);
   8.240 +    np->state  = NETIF_STATE_DISCONNECTED;
   8.241 +
   8.242 +    /* Construct an interface-CONNECT message for the domain controller. */
   8.243 +    cmsg.type      = CMSG_NETIF_FE;
   8.244 +    cmsg.subtype   = CMSG_NETIF_FE_INTERFACE_CONNECT;
   8.245 +    cmsg.length    = sizeof(netif_fe_interface_connect_t);
   8.246 +    up.handle      = 0;
   8.247 +    up.tx_shmem_frame = virt_to_machine(np->tx) >> PAGE_SHIFT;
   8.248 +    up.rx_shmem_frame = virt_to_machine(np->rx) >> PAGE_SHIFT;
   8.249 +    memcpy(cmsg.msg, &up, sizeof(up));
   8.250 +
   8.251 +    /* Tell the controller to bring up the interface. */
   8.252 +    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
   8.253 +}
   8.254 +
   8.255 +static void netif_bringup_phase2(void *unused)
   8.256 +{
   8.257 +    struct net_device *dev;
   8.258 +    struct net_private *np;
   8.259 +
   8.260 +    dev = find_dev_by_handle(0);
   8.261 +    np  = dev->priv;
   8.262 +    
   8.263 +    np->irq = bind_evtchn_to_irq(np->evtchn);
   8.264 +    (void)request_irq(np->irq, netif_int, SA_SAMPLE_RANDOM, 
   8.265 +                      "netif", dev);
   8.266 +
   8.267 +    np->state = NETIF_STATE_CONNECTED;
   8.268 +}
   8.269 +
   8.270 +static void netif_status_change(netif_fe_interface_status_changed_t *status)
   8.271 +{
   8.272 +    struct net_device *dev;
   8.273 +    struct net_private *np;
   8.274 +    
   8.275 +    if ( status->handle != 0 )
   8.276 +    {
   8.277 +        printk(KERN_WARNING "Status change on unsupported netif %d\n",
   8.278 +               status->handle);
   8.279 +        return;
   8.280 +    }
   8.281 +
   8.282 +    dev = find_dev_by_handle(0);
   8.283 +    np  = dev->priv;
   8.284 +    
   8.285 +    switch ( status->status )
   8.286 +    {
   8.287 +    case NETIF_INTERFACE_STATUS_DESTROYED:
   8.288 +        printk(KERN_WARNING "Unexpected netif-DESTROYED message in state %d\n",
   8.289 +               netif_state);
   8.290 +        break;
   8.291 +
   8.292 +    case NETIF_INTERFACE_STATUS_DISCONNECTED:
   8.293 +        if ( np->state != NETIF_STATE_CLOSED )
   8.294 +        {
   8.295 +            printk(KERN_WARNING "Unexpected netif-DISCONNECTED message"
   8.296 +                   " in state %d\n", netif_state);
   8.297 +            break;
   8.298 +        }
   8.299 +        netif_statechange_tq.routine = netif_bringup_phase1;
   8.300 +        schedule_task(&netif_statechange_tq);
   8.301 +        break;
   8.302 +
   8.303 +    case NETIF_INTERFACE_STATUS_CONNECTED:
   8.304 +        if ( np->state == NETIF_STATE_CLOSED )
   8.305 +        {
   8.306 +            printk(KERN_WARNING "Unexpected netif-CONNECTED message"
   8.307 +                   " in state %d\n", netif_state);
   8.308 +            break;
   8.309 +        }
   8.310 +        np->evtchn = status->evtchn;
   8.311 +        memcpy(dev->dev_addr, status->mac, ETH_ALEN);
   8.312 +        netif_statechange_tq.routine = netif_bringup_phase2;
   8.313 +        schedule_task(&netif_statechange_tq);
   8.314 +        break;
   8.315 +
   8.316 +    default:
   8.317 +        printk(KERN_WARNING "Status change to unknown value %d\n", 
   8.318 +               status->status);
   8.319 +        break;
   8.320 +    }
   8.321 +}
   8.322 +
   8.323 +
   8.324 +static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
   8.325 +{
   8.326 +    switch ( msg->subtype )
   8.327 +    {
   8.328 +    case CMSG_NETIF_FE_INTERFACE_STATUS_CHANGED:
   8.329 +        if ( msg->length != sizeof(netif_fe_interface_status_changed_t) )
   8.330 +            goto parse_error;
   8.331 +        netif_status_change((netif_fe_interface_status_changed_t *)
   8.332 +                            &msg->msg[0]);
   8.333 +        break;
   8.334 +    default:
   8.335 +        goto parse_error;
   8.336 +    }
   8.337 +
   8.338 +    ctrl_if_send_response(msg);
   8.339 +    return;
   8.340 +
   8.341 + parse_error:
   8.342 +    msg->length = 0;
   8.343 +    ctrl_if_send_response(msg);
   8.344 +}
   8.345 +
   8.346 +
   8.347  static int __init init_module(void)
   8.348  {
   8.349 -#if 0
   8.350 -    int i, fixmap_idx=-1, err;
   8.351 +    ctrl_msg_t                       cmsg;
   8.352 +    netif_fe_driver_status_changed_t st;
   8.353 +    int i, err;
   8.354      struct net_device *dev;
   8.355      struct net_private *np;
   8.356 -    netop_t netop;
   8.357  
   8.358      INIT_LIST_HEAD(&dev_list);
   8.359  
   8.360 -    network_irq = bind_virq_to_irq(VIRQ_NET);
   8.361 -    debug_irq   = bind_virq_to_irq(VIRQ_DEBUG);
   8.362 +    if ( (dev = alloc_etherdev(sizeof(struct net_private))) == NULL )
   8.363 +    {
   8.364 +        err = -ENOMEM;
   8.365 +        goto fail;
   8.366 +    }
   8.367  
   8.368 -    err = request_irq(network_irq, network_interrupt, 
   8.369 -                      SA_SAMPLE_RANDOM, "network", NULL);
   8.370 -    if ( err )
   8.371 +    np = dev->priv;
   8.372 +    np->state  = NETIF_STATE_CLOSED;
   8.373 +    np->handle = 0;
   8.374 +
   8.375 +    dev->open            = network_open;
   8.376 +    dev->hard_start_xmit = network_start_xmit;
   8.377 +    dev->stop            = network_close;
   8.378 +    dev->get_stats       = network_get_stats;
   8.379 +    
   8.380 +    if ( (err = register_netdev(dev)) != 0 )
   8.381      {
   8.382 -        printk(KERN_WARNING "Could not allocate network interrupt\n");
   8.383 +        kfree(dev);
   8.384          goto fail;
   8.385      }
   8.386      
   8.387 -    err = request_irq(debug_irq, dbg_network_int, 
   8.388 -                      SA_SHIRQ, "net_dbg", &dbg_network_int);
   8.389 -    if ( err )
   8.390 -        printk(KERN_WARNING "Non-fatal error -- no debug interrupt\n");
   8.391 +    np->dev = dev;
   8.392 +    list_add(&np->list, &dev_list);
   8.393  
   8.394 -    for ( i = 0; i < MAX_DOMAIN_VIFS; i++ )
   8.395 -    {
   8.396 -        /* If the VIF is invalid then the query hypercall will fail. */
   8.397 -        netop.cmd = NETOP_GET_VIF_INFO;
   8.398 -        netop.vif = i;
   8.399 -        if ( HYPERVISOR_net_io_op(&netop) != 0 )
   8.400 -            continue;
   8.401 -
   8.402 -        /* We actually only support up to 4 vifs right now. */
   8.403 -        if ( ++fixmap_idx == 4 )
   8.404 -            break;
   8.405 +    (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
   8.406  
   8.407 -        dev = alloc_etherdev(sizeof(struct net_private));
   8.408 -        if ( dev == NULL )
   8.409 -        {
   8.410 -            err = -ENOMEM;
   8.411 -            goto fail;
   8.412 -        }
   8.413 -
   8.414 -        np = dev->priv;
   8.415 -        np->state               = STATE_CLOSED;
   8.416 -        np->net_ring_fixmap_idx = fixmap_idx;
   8.417 -        np->idx                 = i;
   8.418 +    /* Send a driver-UP notification to the domain controller. */
   8.419 +    cmsg.type      = CMSG_NETIF_FE;
   8.420 +    cmsg.subtype   = CMSG_NETIF_FE_DRIVER_STATUS_CHANGED;
   8.421 +    cmsg.length    = sizeof(netif_fe_driver_status_changed_t);
   8.422 +    st.status      = NETIF_DRIVER_STATUS_UP;
   8.423 +    memcpy(cmsg.msg, &st, sizeof(st));
   8.424 +    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
   8.425  
   8.426 -        SET_MODULE_OWNER(dev);
   8.427 -        dev->open            = network_open;
   8.428 -        dev->hard_start_xmit = network_start_xmit;
   8.429 -        dev->stop            = network_close;
   8.430 -        dev->get_stats       = network_get_stats;
   8.431 -
   8.432 -        memcpy(dev->dev_addr, netop.u.get_vif_info.vmac, ETH_ALEN);
   8.433 -
   8.434 -        if ( (err = register_netdev(dev)) != 0 )
   8.435 -        {
   8.436 -            kfree(dev);
   8.437 -            goto fail;
   8.438 -        }
   8.439 -
   8.440 -        np->dev = dev;
   8.441 -        list_add(&np->list, &dev_list);
   8.442 +    /*
   8.443 +     * We should read 'nr_interfaces' from response message and wait
   8.444 +     * for notifications before proceeding. For now we assume that we
   8.445 +     * will be notified of exactly one interface.
   8.446 +     */
   8.447 +    while ( np->state != NETIF_STATE_CONNECTED )
   8.448 +    {
   8.449 +        set_current_state(TASK_INTERRUPTIBLE);
   8.450 +        schedule_timeout(1);
   8.451      }
   8.452  
   8.453      return 0;
   8.454 @@ -534,30 +588,13 @@ static int __init init_module(void)
   8.455   fail:
   8.456      cleanup_module();
   8.457      return err;
   8.458 -#endif
   8.459 -    return 0;
   8.460  }
   8.461  
   8.462  
   8.463  static void cleanup_module(void)
   8.464  {
   8.465 -    struct net_private *np;
   8.466 -    struct net_device *dev;
   8.467 -
   8.468 -    while ( !list_empty(&dev_list) )
   8.469 -    {
   8.470 -        np = list_entry(dev_list.next, struct net_private, list);
   8.471 -        list_del(&np->list);
   8.472 -        dev = np->dev;
   8.473 -        unregister_netdev(dev);
   8.474 -        kfree(dev);
   8.475 -    }
   8.476 -
   8.477 -    free_irq(network_irq, NULL);
   8.478 -    free_irq(debug_irq, NULL);
   8.479 -
   8.480 -    unbind_virq_from_irq(VIRQ_NET);
   8.481 -    unbind_virq_from_irq(VIRQ_DEBUG);
   8.482 +    /* XXX FIXME */
   8.483 +    BUG();
   8.484  }
   8.485  
   8.486  
     9.1 --- a/xenolinux-2.4.26-sparse/arch/xen/mm/hypervisor.c	Thu May 06 11:55:12 2004 +0000
     9.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/mm/hypervisor.c	Thu May 06 11:56:17 2004 +0000
     9.3 @@ -8,7 +8,10 @@
     9.4  
     9.5  #include <linux/config.h>
     9.6  #include <linux/sched.h>
     9.7 +#include <linux/mm.h>
     9.8 +#include <linux/vmalloc.h>
     9.9  #include <asm/hypervisor.h>
    9.10 +#include <asm/hypervisor-ifs/dom_mem_ops.h>
    9.11  #include <asm/page.h>
    9.12  #include <asm/pgtable.h>
    9.13  #include <asm/multicall.h>
    9.14 @@ -242,3 +245,105 @@ void queue_set_ldt(unsigned long ptr, un
    9.15      increment_index();
    9.16      spin_unlock_irqrestore(&update_lock, flags);
    9.17  }
    9.18 +
    9.19 +void queue_machphys_update(unsigned long mfn, unsigned long pfn)
    9.20 +{
    9.21 +    unsigned long flags;
    9.22 +    spin_lock_irqsave(&update_lock, flags);
    9.23 +    update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
    9.24 +    update_queue[idx].val = pfn;
    9.25 +    increment_index();
    9.26 +    spin_unlock_irqrestore(&update_lock, flags);
    9.27 +}
    9.28 +
    9.29 +#ifdef CONFIG_XEN_PHYSDEV_ACCESS
    9.30 +
    9.31 +unsigned long allocate_empty_lowmem_region(unsigned long pages)
    9.32 +{
    9.33 +    pgd_t         *pgd; 
    9.34 +    pmd_t         *pmd;
    9.35 +    pte_t         *pte;
    9.36 +    unsigned long *pfn_array;
    9.37 +    unsigned long  vstart;
    9.38 +    unsigned long  i;
    9.39 +    int            ret;
    9.40 +    unsigned int   order = get_order(pages*PAGE_SIZE);
    9.41 +    dom_mem_op_t   dom_mem_op;
    9.42 +
    9.43 +    vstart = __get_free_pages(GFP_KERNEL, order);
    9.44 +    if ( vstart == 0 )
    9.45 +        return 0UL;
    9.46 +
    9.47 +    pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
    9.48 +    if ( pfn_array == NULL )
    9.49 +        BUG();
    9.50 +
    9.51 +    for ( i = 0; i < (1<<order); i++ )
    9.52 +    {
    9.53 +        pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
    9.54 +        pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
    9.55 +        pte = pte_offset(pmd, (vstart + (i*PAGE_SIZE))); 
    9.56 +        pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
    9.57 +        queue_l1_entry_update(pte, 0);
    9.58 +        phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = 0xdeadbeef;
    9.59 +    }
    9.60 +
    9.61 +    flush_page_update_queue();
    9.62 +
    9.63 +    dom_mem_op.op = MEMOP_RESERVATION_DECREASE;
    9.64 +    dom_mem_op.u.decrease.size  = 1<<order;
    9.65 +    dom_mem_op.u.decrease.pages = pfn_array;
    9.66 +    if ( (ret = HYPERVISOR_dom_mem_op(&dom_mem_op)) != (1<<order) )
    9.67 +    {
    9.68 +        printk(KERN_WARNING "Unable to reduce memory reservation (%d)\n", ret);
    9.69 +        BUG();
    9.70 +    }
    9.71 +
    9.72 +    vfree(pfn_array);
    9.73 +
    9.74 +    return vstart;
    9.75 +}
    9.76 +
    9.77 +void deallocate_lowmem_region(unsigned long vstart, unsigned long pages)
    9.78 +{
    9.79 +    pgd_t         *pgd; 
    9.80 +    pmd_t         *pmd;
    9.81 +    pte_t         *pte;
    9.82 +    unsigned long *pfn_array;
    9.83 +    unsigned long  i;
    9.84 +    int            ret;
    9.85 +    unsigned int   order = get_order(pages*PAGE_SIZE);
    9.86 +    dom_mem_op_t   dom_mem_op;
    9.87 +
    9.88 +    pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
    9.89 +    if ( pfn_array == NULL )
    9.90 +        BUG();
    9.91 +
    9.92 +    dom_mem_op.op = MEMOP_RESERVATION_INCREASE;
    9.93 +    dom_mem_op.u.increase.size  = 1<<order;
    9.94 +    dom_mem_op.u.increase.pages = pfn_array;
    9.95 +    if ( (ret = HYPERVISOR_dom_mem_op(&dom_mem_op)) != (1<<order) )
    9.96 +    {
    9.97 +        printk(KERN_WARNING "Unable to increase memory reservation (%d)\n",
    9.98 +               ret);
    9.99 +        BUG();
   9.100 +    }
   9.101 +
   9.102 +    for ( i = 0; i < (1<<order); i++ )
   9.103 +    {
   9.104 +        pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
   9.105 +        pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
   9.106 +        pte = pte_offset(pmd, (vstart + (i*PAGE_SIZE)));
   9.107 +        queue_l1_entry_update(pte, (pfn_array[i]<<PAGE_SHIFT)|__PAGE_KERNEL);
   9.108 +        queue_machphys_update(pfn_array[i], __pa(vstart)>>PAGE_SHIFT);
   9.109 +        phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = pfn_array[i];
   9.110 +    }
   9.111 +
   9.112 +    flush_page_update_queue();
   9.113 +
   9.114 +    vfree(pfn_array);
   9.115 +
   9.116 +    free_pages(vstart, order);
   9.117 +}
   9.118 +
   9.119 +#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
    10.1 --- a/xenolinux-2.4.26-sparse/include/asm-xen/hypervisor.h	Thu May 06 11:55:12 2004 +0000
    10.2 +++ b/xenolinux-2.4.26-sparse/include/asm-xen/hypervisor.h	Thu May 06 11:56:17 2004 +0000
    10.3 @@ -44,6 +44,7 @@ void queue_pgd_unpin(unsigned long ptr);
    10.4  void queue_pte_pin(unsigned long ptr);
    10.5  void queue_pte_unpin(unsigned long ptr);
    10.6  void queue_set_ldt(unsigned long ptr, unsigned long bytes);
    10.7 +void queue_machphys_update(unsigned long mfn, unsigned long pfn);
    10.8  #define MMU_UPDATE_DEBUG 0
    10.9  
   10.10  #if MMU_UPDATE_DEBUG > 0
   10.11 @@ -137,6 +138,12 @@ static inline int flush_page_update_queu
   10.12  #define XEN_flush_page_update_queue() (_flush_page_update_queue())
   10.13  void MULTICALL_flush_page_update_queue(void);
   10.14  
   10.15 +#ifdef CONFIG_XEN_PHYSDEV_ACCESS
   10.16 +/* Allocate a contiguous empty region of low memory. Return virtual start. */
   10.17 +unsigned long allocate_empty_lowmem_region(unsigned long pages);
   10.18 +/* Deallocate a contiguous region of low memory. Return it to the allocator. */
   10.19 +void deallocate_lowmem_region(unsigned long vstart, unsigned long pages);
   10.20 +#endif
   10.21  
   10.22  /*
   10.23   * Assembler stubs for hyper-calls.