ia64/xen-unstable

changeset 2012:cabecfa924ab

bitkeeper revision 1.1108.33.20 (410bc51bu4HGQYf4dl0npPACAL5Irw)

share backend network driver between Linux 2.4 and 2.6
author cl349@freefall.cl.cam.ac.uk
date Sat Jul 31 16:13:15 2004 +0000 (2004-07-31)
parents 15b95c4931e0
children 140b3d8d1286 eca179b92d4d 767b46546305
files .rootkeys linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/common.h linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/control.c linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/interface.c linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/main.c linux-2.4.26-xen-sparse/mkbuildtree linux-2.6.7-xen-sparse/drivers/xen/netback/common.h
line diff
     1.1 --- a/.rootkeys	Fri Jul 30 19:57:28 2004 +0000
     1.2 +++ b/.rootkeys	Sat Jul 31 16:13:15 2004 +0000
     1.3 @@ -67,10 +67,6 @@ 40420a6ebRqDjufoN1WSJvolEW2Wjw linux-2.4
     1.4  40420a73Wou6JlsZDiu6YwjYomsm7A linux-2.4.26-xen-sparse/arch/xen/drivers/evtchn/evtchn.c
     1.5  4083dc16-Kd5y9psK_yk161sme5j5Q linux-2.4.26-xen-sparse/arch/xen/drivers/netif/Makefile
     1.6  4083dc16UmHXxS9g_UFVnkUpN-oP2Q linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/Makefile
     1.7 -4097ba83pPKYqMS3Gl-PVKIgYU1FZw linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/common.h
     1.8 -4097ba83glWYwQTkbPqgLIlYDOPVLg linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/control.c
     1.9 -4097ba837h2tuiweIWp-voNVzCRI6g linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/interface.c
    1.10 -4087cf0d5dudKw_DecIJgOhLlBF_0Q linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/main.c
    1.11  405853f2wg7JXZJNltspMwOZJklxgw linux-2.4.26-xen-sparse/arch/xen/drivers/netif/frontend/Makefile
    1.12  4097ba83Qy2eafeFUhGhm6_4iMIIDw linux-2.4.26-xen-sparse/arch/xen/drivers/netif/netif.h
    1.13  3e5a4e65lWzkiPXsZdzPt2RNnJGG1g linux-2.4.26-xen-sparse/arch/xen/kernel/Makefile
     2.1 --- a/linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/common.h	Fri Jul 30 19:57:28 2004 +0000
     2.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.3 @@ -1,95 +0,0 @@
     2.4 -/******************************************************************************
     2.5 - * arch/xen/drivers/netif/backend/common.h
     2.6 - */
     2.7 -
     2.8 -#ifndef __NETIF__BACKEND__COMMON_H__
     2.9 -#define __NETIF__BACKEND__COMMON_H__
    2.10 -
    2.11 -#include <linux/config.h>
    2.12 -#include <linux/module.h>
    2.13 -#include <linux/interrupt.h>
    2.14 -#include <linux/slab.h>
    2.15 -#include <linux/ip.h>
    2.16 -#include <linux/in.h>
    2.17 -#include <linux/netdevice.h>
    2.18 -#include <linux/etherdevice.h>
    2.19 -#include <asm/ctrl_if.h>
    2.20 -#include <asm/io.h>
    2.21 -#include "../netif.h"
    2.22 -
    2.23 -#if 0
    2.24 -#define ASSERT(_p) \
    2.25 -    if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
    2.26 -    __LINE__, __FILE__); *(int*)0=0; }
    2.27 -#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
    2.28 -                           __FILE__ , __LINE__ , ## _a )
    2.29 -#else
    2.30 -#define ASSERT(_p) ((void)0)
    2.31 -#define DPRINTK(_f, _a...) ((void)0)
    2.32 -#endif
    2.33 -
    2.34 -typedef struct netif_st {
    2.35 -    /* Unique identifier for this interface. */
    2.36 -    domid_t          domid;
    2.37 -    unsigned int     handle;
    2.38 -
    2.39 -    /* Physical parameters of the comms window. */
    2.40 -    unsigned long    tx_shmem_frame;
    2.41 -    unsigned long    rx_shmem_frame;
    2.42 -    unsigned int     evtchn;
    2.43 -    int              irq;
    2.44 -
    2.45 -    /* The shared rings and indexes. */
    2.46 -    netif_tx_interface_t *tx;
    2.47 -    netif_rx_interface_t *rx;
    2.48 -
    2.49 -    /* Private indexes into shared ring. */
    2.50 -    NETIF_RING_IDX rx_req_cons;
    2.51 -    NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
    2.52 -    NETIF_RING_IDX tx_req_cons;
    2.53 -    NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
    2.54 -
    2.55 -    /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
    2.56 -    unsigned long   credit_bytes;
    2.57 -    unsigned long   credit_usec;
    2.58 -    unsigned long   remaining_credit;
    2.59 -    struct timer_list credit_timeout;
    2.60 -
    2.61 -    /* Miscellaneous private stuff. */
    2.62 -    enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
    2.63 -    /*
    2.64 -     * DISCONNECT response is deferred until pending requests are ack'ed.
    2.65 -     * We therefore need to store the id from the original request.
    2.66 -     */
    2.67 -    u8               disconnect_rspid;
    2.68 -    struct netif_st *hash_next;
    2.69 -    struct list_head list;  /* scheduling list */
    2.70 -    atomic_t         refcnt;
    2.71 -    spinlock_t       rx_lock, tx_lock;
    2.72 -    struct net_device *dev;
    2.73 -    struct net_device_stats stats;
    2.74 -} netif_t;
    2.75 -
    2.76 -void netif_create(netif_be_create_t *create);
    2.77 -void netif_destroy(netif_be_destroy_t *destroy);
    2.78 -void netif_connect(netif_be_connect_t *connect);
    2.79 -int  netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id);
    2.80 -void __netif_disconnect_complete(netif_t *netif);
    2.81 -netif_t *netif_find_by_handle(domid_t domid, unsigned int handle);
    2.82 -#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
    2.83 -#define netif_put(_b)                             \
    2.84 -    do {                                          \
    2.85 -        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
    2.86 -            __netif_disconnect_complete(_b);      \
    2.87 -    } while (0)
    2.88 -
    2.89 -void netif_interface_init(void);
    2.90 -void netif_ctrlif_init(void);
    2.91 -
    2.92 -void netif_deschedule(netif_t *netif);
    2.93 -
    2.94 -int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
    2.95 -struct net_device_stats *netif_be_get_stats(struct net_device *dev);
    2.96 -void netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
    2.97 -
    2.98 -#endif /* __NETIF__BACKEND__COMMON_H__ */
     3.1 --- a/linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/control.c	Fri Jul 30 19:57:28 2004 +0000
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,65 +0,0 @@
     3.4 -/******************************************************************************
     3.5 - * arch/xen/drivers/netif/backend/control.c
     3.6 - * 
     3.7 - * Routines for interfacing with the control plane.
     3.8 - * 
     3.9 - * Copyright (c) 2004, Keir Fraser
    3.10 - */
    3.11 -
    3.12 -#include "common.h"
    3.13 -
    3.14 -static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
    3.15 -{
    3.16 -    switch ( msg->subtype )
    3.17 -    {
    3.18 -    case CMSG_NETIF_BE_CREATE:
    3.19 -        if ( msg->length != sizeof(netif_be_create_t) )
    3.20 -            goto parse_error;
    3.21 -        netif_create((netif_be_create_t *)&msg->msg[0]);
    3.22 -        break;        
    3.23 -    case CMSG_NETIF_BE_DESTROY:
    3.24 -        if ( msg->length != sizeof(netif_be_destroy_t) )
    3.25 -            goto parse_error;
    3.26 -        netif_destroy((netif_be_destroy_t *)&msg->msg[0]);
    3.27 -        break;        
    3.28 -    case CMSG_NETIF_BE_CONNECT:
    3.29 -        if ( msg->length != sizeof(netif_be_connect_t) )
    3.30 -            goto parse_error;
    3.31 -        netif_connect((netif_be_connect_t *)&msg->msg[0]);
    3.32 -        break;        
    3.33 -    case CMSG_NETIF_BE_DISCONNECT:
    3.34 -        if ( msg->length != sizeof(netif_be_disconnect_t) )
    3.35 -            goto parse_error;
    3.36 -        if ( !netif_disconnect((netif_be_disconnect_t *)&msg->msg[0],msg->id) )
    3.37 -            return; /* Sending the response is deferred until later. */
    3.38 -        break;        
    3.39 -    default:
    3.40 -        goto parse_error;
    3.41 -    }
    3.42 -
    3.43 -    ctrl_if_send_response(msg);
    3.44 -    return;
    3.45 -
    3.46 - parse_error:
    3.47 -    DPRINTK("Parse error while reading message subtype %d, len %d\n",
    3.48 -            msg->subtype, msg->length);
    3.49 -    msg->length = 0;
    3.50 -    ctrl_if_send_response(msg);
    3.51 -}
    3.52 -
    3.53 -void netif_ctrlif_init(void)
    3.54 -{
    3.55 -    ctrl_msg_t                       cmsg;
    3.56 -    netif_be_driver_status_changed_t st;
    3.57 -
    3.58 -    (void)ctrl_if_register_receiver(CMSG_NETIF_BE, netif_ctrlif_rx,
    3.59 -                                    CALLBACK_IN_BLOCKING_CONTEXT);
    3.60 -
    3.61 -    /* Send a driver-UP notification to the domain controller. */
    3.62 -    cmsg.type      = CMSG_NETIF_BE;
    3.63 -    cmsg.subtype   = CMSG_NETIF_BE_DRIVER_STATUS_CHANGED;
    3.64 -    cmsg.length    = sizeof(netif_be_driver_status_changed_t);
    3.65 -    st.status      = NETIF_DRIVER_STATUS_UP;
    3.66 -    memcpy(cmsg.msg, &st, sizeof(st));
    3.67 -    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
    3.68 -}
     4.1 --- a/linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/interface.c	Fri Jul 30 19:57:28 2004 +0000
     4.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.3 @@ -1,282 +0,0 @@
     4.4 -/******************************************************************************
     4.5 - * arch/xen/drivers/netif/backend/interface.c
     4.6 - * 
     4.7 - * Network-device interface management.
     4.8 - * 
     4.9 - * Copyright (c) 2004, Keir Fraser
    4.10 - */
    4.11 -
    4.12 -#include "common.h"
    4.13 -#include <linux/rtnetlink.h>
    4.14 -
    4.15 -#define NETIF_HASHSZ 1024
    4.16 -#define NETIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(NETIF_HASHSZ-1))
    4.17 -
    4.18 -static netif_t *netif_hash[NETIF_HASHSZ];
    4.19 -
    4.20 -netif_t *netif_find_by_handle(domid_t domid, unsigned int handle)
    4.21 -{
    4.22 -    netif_t *netif = netif_hash[NETIF_HASH(domid, handle)];
    4.23 -    while ( (netif != NULL) && 
    4.24 -            ((netif->domid != domid) || (netif->handle != handle)) )
    4.25 -        netif = netif->hash_next;
    4.26 -    return netif;
    4.27 -}
    4.28 -
    4.29 -void __netif_disconnect_complete(netif_t *netif)
    4.30 -{
    4.31 -    ctrl_msg_t            cmsg;
    4.32 -    netif_be_disconnect_t disc;
    4.33 -
    4.34 -    /*
    4.35 -     * These can't be done in __netif_disconnect() because at that point there
    4.36 -     * may be outstanding requests at the disc whose asynchronous responses
    4.37 -     * must still be notified to the remote driver.
    4.38 -     */
    4.39 -    unbind_evtchn_from_irq(netif->evtchn);
    4.40 -    vfree(netif->tx); /* Frees netif->rx as well. */
    4.41 -    rtnl_lock();
    4.42 -    (void)dev_close(netif->dev);
    4.43 -    rtnl_unlock();
    4.44 -
    4.45 -    /* Construct the deferred response message. */
    4.46 -    cmsg.type         = CMSG_NETIF_BE;
    4.47 -    cmsg.subtype      = CMSG_NETIF_BE_DISCONNECT;
    4.48 -    cmsg.id           = netif->disconnect_rspid;
    4.49 -    cmsg.length       = sizeof(netif_be_disconnect_t);
    4.50 -    disc.domid        = netif->domid;
    4.51 -    disc.netif_handle = netif->handle;
    4.52 -    disc.status       = NETIF_BE_STATUS_OKAY;
    4.53 -    memcpy(cmsg.msg, &disc, sizeof(disc));
    4.54 -
    4.55 -    /*
    4.56 -     * Make sure message is constructed /before/ status change, because
    4.57 -     * after the status change the 'netif' structure could be deallocated at
    4.58 -     * any time. Also make sure we send the response /after/ status change,
    4.59 -     * as otherwise a subsequent CONNECT request could spuriously fail if
    4.60 -     * another CPU doesn't see the status change yet.
    4.61 -     */
    4.62 -    mb();
    4.63 -    if ( netif->status != DISCONNECTING )
    4.64 -        BUG();
    4.65 -    netif->status = DISCONNECTED;
    4.66 -    mb();
    4.67 -
    4.68 -    /* Send the successful response. */
    4.69 -    ctrl_if_send_response(&cmsg);
    4.70 -}
    4.71 -
    4.72 -void netif_create(netif_be_create_t *create)
    4.73 -{
    4.74 -    int                err = 0;
    4.75 -    domid_t            domid  = create->domid;
    4.76 -    unsigned int       handle = create->netif_handle;
    4.77 -    struct net_device *dev;
    4.78 -    netif_t          **pnetif, *netif;
    4.79 -    char               name[IFNAMSIZ] = {};
    4.80 -
    4.81 -    snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
    4.82 -    dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
    4.83 -    if ( dev == NULL )
    4.84 -    {
    4.85 -        DPRINTK("Could not create netif: out of memory\n");
    4.86 -        create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
    4.87 -        return;
    4.88 -    }
    4.89 -
    4.90 -    netif = dev->priv;
    4.91 -    memset(netif, 0, sizeof(*netif));
    4.92 -    netif->domid  = domid;
    4.93 -    netif->handle = handle;
    4.94 -    netif->status = DISCONNECTED;
    4.95 -    spin_lock_init(&netif->rx_lock);
    4.96 -    spin_lock_init(&netif->tx_lock);
    4.97 -    atomic_set(&netif->refcnt, 0);
    4.98 -    netif->dev = dev;
    4.99 -
   4.100 -    netif->credit_bytes = netif->remaining_credit = ~0UL;
   4.101 -    netif->credit_usec  = 0UL;
   4.102 -    /*init_ac_timer(&new_vif->credit_timeout);*/
   4.103 -
   4.104 -    pnetif = &netif_hash[NETIF_HASH(domid, handle)];
   4.105 -    while ( *pnetif != NULL )
   4.106 -    {
   4.107 -        if ( ((*pnetif)->domid == domid) && ((*pnetif)->handle == handle) )
   4.108 -        {
   4.109 -            DPRINTK("Could not create netif: already exists\n");
   4.110 -            create->status = NETIF_BE_STATUS_INTERFACE_EXISTS;
   4.111 -            kfree(dev);
   4.112 -            return;
   4.113 -        }
   4.114 -        pnetif = &(*pnetif)->hash_next;
   4.115 -    }
   4.116 -
   4.117 -    dev->hard_start_xmit = netif_be_start_xmit;
   4.118 -    dev->get_stats       = netif_be_get_stats;
   4.119 -    memcpy(dev->dev_addr, create->mac, ETH_ALEN);
   4.120 -
   4.121 -    /* Disable queuing. */
   4.122 -    dev->tx_queue_len = 0;
   4.123 -
   4.124 -    /* Force a different MAC from remote end. */
   4.125 -    dev->dev_addr[2] ^= 1;
   4.126 -
   4.127 -    if ( (err = register_netdev(dev)) != 0 )
   4.128 -    {
   4.129 -        DPRINTK("Could not register new net device %s: err=%d\n",
   4.130 -                dev->name, err);
   4.131 -        create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
   4.132 -        kfree(dev);
   4.133 -        return;
   4.134 -    }
   4.135 -
   4.136 -    netif->hash_next = *pnetif;
   4.137 -    *pnetif = netif;
   4.138 -
   4.139 -    DPRINTK("Successfully created netif\n");
   4.140 -    create->status = NETIF_BE_STATUS_OKAY;
   4.141 -}
   4.142 -
   4.143 -void netif_destroy(netif_be_destroy_t *destroy)
   4.144 -{
   4.145 -    domid_t       domid  = destroy->domid;
   4.146 -    unsigned int  handle = destroy->netif_handle;
   4.147 -    netif_t     **pnetif, *netif;
   4.148 -
   4.149 -    pnetif = &netif_hash[NETIF_HASH(domid, handle)];
   4.150 -    while ( (netif = *pnetif) != NULL )
   4.151 -    {
   4.152 -        if ( (netif->domid == domid) && (netif->handle == handle) )
   4.153 -        {
   4.154 -            if ( netif->status != DISCONNECTED )
   4.155 -                goto still_connected;
   4.156 -            goto destroy;
   4.157 -        }
   4.158 -        pnetif = &netif->hash_next;
   4.159 -    }
   4.160 -
   4.161 -    destroy->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
   4.162 -    return;
   4.163 -
   4.164 - still_connected:
   4.165 -    destroy->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
   4.166 -    return;
   4.167 -
   4.168 - destroy:
   4.169 -    *pnetif = netif->hash_next;
   4.170 -    unregister_netdev(netif->dev);
   4.171 -    kfree(netif->dev);
   4.172 -    destroy->status = NETIF_BE_STATUS_OKAY;
   4.173 -}
   4.174 -
   4.175 -void netif_connect(netif_be_connect_t *connect)
   4.176 -{
   4.177 -    domid_t       domid  = connect->domid;
   4.178 -    unsigned int  handle = connect->netif_handle;
   4.179 -    unsigned int  evtchn = connect->evtchn;
   4.180 -    unsigned long tx_shmem_frame = connect->tx_shmem_frame;
   4.181 -    unsigned long rx_shmem_frame = connect->rx_shmem_frame;
   4.182 -    struct vm_struct *vma;
   4.183 -    pgprot_t      prot;
   4.184 -    int           error;
   4.185 -    netif_t      *netif;
   4.186 -    struct net_device *eth0_dev;
   4.187 -
   4.188 -    netif = netif_find_by_handle(domid, handle);
   4.189 -    if ( unlikely(netif == NULL) )
   4.190 -    {
   4.191 -        DPRINTK("netif_connect attempted for non-existent netif (%u,%u)\n", 
   4.192 -                connect->domid, connect->netif_handle); 
   4.193 -        connect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
   4.194 -        return;
   4.195 -    }
   4.196 -
   4.197 -    if ( netif->status != DISCONNECTED )
   4.198 -    {
   4.199 -        connect->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
   4.200 -        return;
   4.201 -    }
   4.202 -
   4.203 -    if ( (vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP)) == NULL )
   4.204 -    {
   4.205 -        connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
   4.206 -        return;
   4.207 -    }
   4.208 -
   4.209 -    prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
   4.210 -    error  = direct_remap_area_pages(&init_mm, 
   4.211 -                                     VMALLOC_VMADDR(vma->addr),
   4.212 -                                     tx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
   4.213 -                                     prot, domid);
   4.214 -    error |= direct_remap_area_pages(&init_mm, 
   4.215 -                                     VMALLOC_VMADDR(vma->addr) + PAGE_SIZE,
   4.216 -                                     rx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
   4.217 -                                     prot, domid);
   4.218 -    if ( error != 0 )
   4.219 -    {
   4.220 -        if ( error == -ENOMEM )
   4.221 -            connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
   4.222 -        else if ( error == -EFAULT )
   4.223 -            connect->status = NETIF_BE_STATUS_MAPPING_ERROR;
   4.224 -        else
   4.225 -            connect->status = NETIF_BE_STATUS_ERROR;
   4.226 -        vfree(vma->addr);
   4.227 -        return;
   4.228 -    }
   4.229 -
   4.230 -    netif->evtchn         = evtchn;
   4.231 -    netif->irq            = bind_evtchn_to_irq(evtchn);
   4.232 -    netif->tx_shmem_frame = tx_shmem_frame;
   4.233 -    netif->rx_shmem_frame = rx_shmem_frame;
   4.234 -    netif->tx             = 
   4.235 -        (netif_tx_interface_t *)vma->addr;
   4.236 -    netif->rx             = 
   4.237 -        (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
   4.238 -    netif->status         = CONNECTED;
   4.239 -    netif_get(netif);
   4.240 -
   4.241 -    rtnl_lock();
   4.242 -    (void)dev_open(netif->dev);
   4.243 -    rtnl_unlock();
   4.244 -
   4.245 -    (void)request_irq(netif->irq, netif_be_int, 0, netif->dev->name, netif);
   4.246 -    netif_start_queue(netif->dev);
   4.247 -
   4.248 -    connect->status = NETIF_BE_STATUS_OKAY;
   4.249 -}
   4.250 -
   4.251 -int netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id)
   4.252 -{
   4.253 -    domid_t       domid  = disconnect->domid;
   4.254 -    unsigned int  handle = disconnect->netif_handle;
   4.255 -    netif_t      *netif;
   4.256 -
   4.257 -    netif = netif_find_by_handle(domid, handle);
   4.258 -    if ( unlikely(netif == NULL) )
   4.259 -    {
   4.260 -        DPRINTK("netif_disconnect attempted for non-existent netif"
   4.261 -                " (%u,%u)\n", disconnect->domid, disconnect->netif_handle); 
   4.262 -        disconnect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
   4.263 -        return 1; /* Caller will send response error message. */
   4.264 -    }
   4.265 -
   4.266 -    if ( netif->status == CONNECTED )
   4.267 -    {
   4.268 -        netif->status = DISCONNECTING;
   4.269 -        netif->disconnect_rspid = rsp_id;
   4.270 -        wmb(); /* Let other CPUs see the status change. */
   4.271 -        netif_stop_queue(netif->dev);
   4.272 -        free_irq(netif->irq, netif);
   4.273 -        netif_deschedule(netif);
   4.274 -        netif_put(netif);
   4.275 -        return 0; /* Caller should not send response message. */
   4.276 -    }
   4.277 -
   4.278 -    disconnect->status = NETIF_BE_STATUS_OKAY;
   4.279 -    return 1;
   4.280 -}
   4.281 -
   4.282 -void netif_interface_init(void)
   4.283 -{
   4.284 -    memset(netif_hash, 0, sizeof(netif_hash));
   4.285 -}
     5.1 --- a/linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/main.c	Fri Jul 30 19:57:28 2004 +0000
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,775 +0,0 @@
     5.4 -/******************************************************************************
     5.5 - * arch/xen/drivers/netif/backend/main.c
     5.6 - * 
     5.7 - * Back-end of the driver for virtual block devices. This portion of the
     5.8 - * driver exports a 'unified' block-device interface that can be accessed
     5.9 - * by any operating system that implements a compatible front end. A 
    5.10 - * reference front-end implementation can be found in:
    5.11 - *  arch/xen/drivers/netif/frontend
    5.12 - * 
    5.13 - * Copyright (c) 2002-2004, K A Fraser
    5.14 - */
    5.15 -
    5.16 -#include "common.h"
    5.17 -
    5.18 -static void netif_page_release(struct page *page);
    5.19 -static void make_tx_response(netif_t *netif, 
    5.20 -                             u16      id,
    5.21 -                             s8       st);
    5.22 -static int  make_rx_response(netif_t *netif, 
    5.23 -                             u16      id, 
    5.24 -                             s8       st,
    5.25 -                             memory_t addr,
    5.26 -                             u16      size);
    5.27 -
    5.28 -static void net_tx_action(unsigned long unused);
    5.29 -static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
    5.30 -
    5.31 -static void net_rx_action(unsigned long unused);
    5.32 -static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
    5.33 -
    5.34 -static struct sk_buff_head rx_queue;
    5.35 -static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2];
    5.36 -static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE*3];
    5.37 -static unsigned char rx_notify[NR_EVENT_CHANNELS];
    5.38 -
    5.39 -/* Don't currently gate addition of an interface to the tx scheduling list. */
    5.40 -#define tx_work_exists(_if) (1)
    5.41 -
    5.42 -#define MAX_PENDING_REQS 256
    5.43 -static unsigned long mmap_vstart;
    5.44 -#define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
    5.45 -
    5.46 -#define PKT_PROT_LEN (ETH_HLEN + 20)
    5.47 -
    5.48 -static struct {
    5.49 -    netif_tx_request_t req;
    5.50 -    netif_t *netif;
    5.51 -} pending_tx_info[MAX_PENDING_REQS];
    5.52 -static u16 pending_ring[MAX_PENDING_REQS];
    5.53 -typedef unsigned int PEND_RING_IDX;
    5.54 -#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
    5.55 -static PEND_RING_IDX pending_prod, pending_cons;
    5.56 -#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
    5.57 -
    5.58 -/* Freed TX SKBs get batched on this ring before return to pending_ring. */
    5.59 -static u16 dealloc_ring[MAX_PENDING_REQS];
    5.60 -static spinlock_t dealloc_lock = SPIN_LOCK_UNLOCKED;
    5.61 -static PEND_RING_IDX dealloc_prod, dealloc_cons;
    5.62 -
    5.63 -static struct sk_buff_head tx_queue;
    5.64 -static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
    5.65 -
    5.66 -static struct list_head net_schedule_list;
    5.67 -static spinlock_t net_schedule_list_lock;
    5.68 -
    5.69 -#define MAX_MFN_ALLOC 64
    5.70 -static unsigned long mfn_list[MAX_MFN_ALLOC];
    5.71 -static unsigned int alloc_index = 0;
    5.72 -static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
    5.73 -
    5.74 -static void __refresh_mfn_list(void)
    5.75 -{
    5.76 -    int ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
    5.77 -                                    mfn_list, MAX_MFN_ALLOC);
    5.78 -    if ( unlikely(ret != MAX_MFN_ALLOC) )
    5.79 -        BUG();
    5.80 -    alloc_index = MAX_MFN_ALLOC;
    5.81 -}
    5.82 -
    5.83 -static unsigned long get_new_mfn(void)
    5.84 -{
    5.85 -    unsigned long mfn, flags;
    5.86 -    spin_lock_irqsave(&mfn_lock, flags);
    5.87 -    if ( alloc_index == 0 )
    5.88 -        __refresh_mfn_list();
    5.89 -    mfn = mfn_list[--alloc_index];
    5.90 -    spin_unlock_irqrestore(&mfn_lock, flags);
    5.91 -    return mfn;
    5.92 -}
    5.93 -
    5.94 -static void dealloc_mfn(unsigned long mfn)
    5.95 -{
    5.96 -    unsigned long flags;
    5.97 -    spin_lock_irqsave(&mfn_lock, flags);
    5.98 -    if ( alloc_index != MAX_MFN_ALLOC )
    5.99 -        mfn_list[alloc_index++] = mfn;
   5.100 -    else if ( HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, &mfn, 1) != 1 )
   5.101 -        BUG();
   5.102 -    spin_unlock_irqrestore(&mfn_lock, flags);
   5.103 -}
   5.104 -
   5.105 -static inline void maybe_schedule_tx_action(void)
   5.106 -{
   5.107 -    smp_mb();
   5.108 -    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
   5.109 -         !list_empty(&net_schedule_list) )
   5.110 -        tasklet_schedule(&net_tx_tasklet);
   5.111 -}
   5.112 -
   5.113 -/*
   5.114 - * This is the primary RECEIVE function for a network interface.
   5.115 - * Note that, from the p.o.v. of /this/ OS it looks like a transmit.
   5.116 - */
   5.117 -int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
   5.118 -{
   5.119 -    netif_t *netif = (netif_t *)dev->priv;
   5.120 -
   5.121 -    ASSERT(skb->dev == dev);
   5.122 -
   5.123 -    /* Drop the packet if the target domain has no receive buffers. */
   5.124 -    if ( (netif->rx_req_cons == netif->rx->req_prod) ||
   5.125 -         ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
   5.126 -        goto drop;
   5.127 -
   5.128 -    /*
   5.129 -     * We do not copy the packet unless:
   5.130 -     *  1. The data is shared; or
   5.131 -     *  2. It spans a page boundary; or
   5.132 -     *  3. We cannot be sure the whole data page is allocated.
   5.133 -     * The copying method is taken from skb_copy().
   5.134 -     * NB. We also couldn't cope with fragmented packets, but we won't get
   5.135 -     *     any because we not advertise the NETIF_F_SG feature.
   5.136 -     */
   5.137 -    if ( skb_shared(skb) || skb_cloned(skb) || 
   5.138 -         (((unsigned long)skb->end ^ (unsigned long)skb->head) & PAGE_MASK) ||
   5.139 -         ((skb->end - skb->head) < (PAGE_SIZE/2)) )
   5.140 -    {
   5.141 -        struct sk_buff *nskb = alloc_skb(PAGE_SIZE-1024, GFP_ATOMIC);
   5.142 -        int hlen = skb->data - skb->head;
   5.143 -        if ( unlikely(nskb == NULL) )
   5.144 -            goto drop;
   5.145 -        skb_reserve(nskb, hlen);
   5.146 -        __skb_put(nskb, skb->len);
   5.147 -        (void)skb_copy_bits(skb, -hlen, nskb->head, hlen + skb->len);
   5.148 -        nskb->dev = skb->dev;
   5.149 -        dev_kfree_skb(skb);
   5.150 -        skb = nskb;
   5.151 -    }
   5.152 -
   5.153 -    netif->rx_req_cons++;
   5.154 -
   5.155 -    skb_queue_tail(&rx_queue, skb);
   5.156 -    tasklet_schedule(&net_rx_tasklet);
   5.157 -
   5.158 -    return 0;
   5.159 -
   5.160 - drop:
   5.161 -    netif->stats.rx_dropped++;
   5.162 -    dev_kfree_skb(skb);
   5.163 -    return 0;
   5.164 -}
   5.165 -
   5.166 -#if 0
   5.167 -static void xen_network_done_notify(void)
   5.168 -{
   5.169 -    static struct net_device *eth0_dev = NULL;
   5.170 -    if ( unlikely(eth0_dev == NULL) )
   5.171 -        eth0_dev = __dev_get_by_name("eth0");
   5.172 -    netif_rx_schedule(eth0_dev);
   5.173 -}
   5.174 -/* 
   5.175 - * Add following to poll() function in NAPI driver (Tigon3 is example):
   5.176 - *  if ( xen_network_done() )
   5.177 - *      tg3_enable_ints(tp); 
   5.178 - */
   5.179 -int xen_network_done(void)
   5.180 -{
   5.181 -    return skb_queue_empty(&rx_queue);
   5.182 -}
   5.183 -#endif
   5.184 -
   5.185 -static void net_rx_action(unsigned long unused)
   5.186 -{
   5.187 -    netif_t *netif;
   5.188 -    s8 status;
   5.189 -    u16 size, id, evtchn;
   5.190 -    mmu_update_t *mmu;
   5.191 -    multicall_entry_t *mcl;
   5.192 -    unsigned long vdata, mdata, new_mfn;
   5.193 -    struct sk_buff_head rxq;
   5.194 -    struct sk_buff *skb;
   5.195 -    u16 notify_list[NETIF_RX_RING_SIZE];
   5.196 -    int notify_nr = 0;
   5.197 -
   5.198 -    skb_queue_head_init(&rxq);
   5.199 -
   5.200 -    mcl = rx_mcl;
   5.201 -    mmu = rx_mmu;
   5.202 -    while ( (skb = skb_dequeue(&rx_queue)) != NULL )
   5.203 -    {
   5.204 -        netif   = (netif_t *)skb->dev->priv;
   5.205 -        vdata   = (unsigned long)skb->data;
   5.206 -        mdata   = virt_to_machine(vdata);
   5.207 -        new_mfn = get_new_mfn();
   5.208 -        
   5.209 -        mmu[0].ptr  = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
   5.210 -        mmu[0].val  = __pa(vdata) >> PAGE_SHIFT;        
   5.211 -        mmu[1].val  = (unsigned long)(netif->domid<<16) & ~0xFFFFUL;
   5.212 -        mmu[1].ptr  = (unsigned long)(netif->domid<< 0) & ~0xFFFFUL;
   5.213 -        mmu[1].ptr |= MMU_EXTENDED_COMMAND;
   5.214 -        mmu[1].val |= MMUEXT_SET_SUBJECTDOM;
   5.215 -        mmu[2].ptr  = (mdata & PAGE_MASK) | MMU_EXTENDED_COMMAND;
   5.216 -        mmu[2].val  = MMUEXT_REASSIGN_PAGE;
   5.217 -
   5.218 -        mcl[0].op = __HYPERVISOR_update_va_mapping;
   5.219 -        mcl[0].args[0] = vdata >> PAGE_SHIFT;
   5.220 -        mcl[0].args[1] = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL;
   5.221 -        mcl[0].args[2] = 0;
   5.222 -        mcl[1].op = __HYPERVISOR_mmu_update;
   5.223 -        mcl[1].args[0] = (unsigned long)mmu;
   5.224 -        mcl[1].args[1] = 3;
   5.225 -        mcl[1].args[2] = 0;
   5.226 -
   5.227 -        mcl += 2;
   5.228 -        mmu += 3;
   5.229 -
   5.230 -        __skb_queue_tail(&rxq, skb);
   5.231 -
   5.232 -        /* Filled the batch queue? */
   5.233 -        if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) )
   5.234 -            break;
   5.235 -    }
   5.236 -
   5.237 -    if ( mcl == rx_mcl )
   5.238 -        return;
   5.239 -
   5.240 -    mcl[-2].args[2] = UVMF_FLUSH_TLB;
   5.241 -    if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
   5.242 -        BUG();
   5.243 -
   5.244 -    mcl = rx_mcl;
   5.245 -    mmu = rx_mmu;
   5.246 -    while ( (skb = __skb_dequeue(&rxq)) != NULL )
   5.247 -    {
   5.248 -        netif   = (netif_t *)skb->dev->priv;
   5.249 -        size    = skb->tail - skb->data;
   5.250 -
   5.251 -        /* Rederive the machine addresses. */
   5.252 -        new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
   5.253 -        mdata   = ((mmu[2].ptr & PAGE_MASK) |
   5.254 -                   ((unsigned long)skb->data & ~PAGE_MASK));
   5.255 -        
   5.256 -        phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
   5.257 -        
   5.258 -        atomic_set(&(skb_shinfo(skb)->dataref), 1);
   5.259 -        skb_shinfo(skb)->nr_frags = 0;
   5.260 -        skb_shinfo(skb)->frag_list = NULL;
   5.261 -
   5.262 -        netif->stats.rx_bytes += size;
   5.263 -        netif->stats.rx_packets++;
   5.264 -
   5.265 -        /* The update_va_mapping() must not fail. */
   5.266 -        if ( unlikely(mcl[0].args[5] != 0) )
   5.267 -            BUG();
   5.268 -
   5.269 -        /* Check the reassignment error code. */
   5.270 -        status = NETIF_RSP_OKAY;
   5.271 -        if ( unlikely(mcl[1].args[5] != 0) )
   5.272 -        {
   5.273 -            DPRINTK("Failed MMU update transferring to DOM%u\n", netif->domid);
   5.274 -            dealloc_mfn(mdata >> PAGE_SHIFT);
   5.275 -            status = NETIF_RSP_ERROR;
   5.276 -        }
   5.277 -
   5.278 -        evtchn = netif->evtchn;
   5.279 -        id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
   5.280 -        if ( make_rx_response(netif, id, status, mdata, size) &&
   5.281 -             (rx_notify[evtchn] == 0) )
   5.282 -        {
   5.283 -            rx_notify[evtchn] = 1;
   5.284 -            notify_list[notify_nr++] = evtchn;
   5.285 -        }
   5.286 -
   5.287 -        dev_kfree_skb(skb);
   5.288 -
   5.289 -        mcl += 2;
   5.290 -        mmu += 3;
   5.291 -    }
   5.292 -
   5.293 -    while ( notify_nr != 0 )
   5.294 -    {
   5.295 -        evtchn = notify_list[--notify_nr];
   5.296 -        rx_notify[evtchn] = 0;
   5.297 -        notify_via_evtchn(evtchn);
   5.298 -    }
   5.299 -
   5.300 -    /* More work to do? */
   5.301 -    if ( !skb_queue_empty(&rx_queue) )
   5.302 -        tasklet_schedule(&net_rx_tasklet);
   5.303 -#if 0
   5.304 -    else
   5.305 -        xen_network_done_notify();
   5.306 -#endif
   5.307 -}
   5.308 -
   5.309 -struct net_device_stats *netif_be_get_stats(struct net_device *dev)
   5.310 -{
   5.311 -    netif_t *netif = dev->priv;
   5.312 -    return &netif->stats;
   5.313 -}
   5.314 -
   5.315 -static int __on_net_schedule_list(netif_t *netif)
   5.316 -{
   5.317 -    return netif->list.next != NULL;
   5.318 -}
   5.319 -
   5.320 -static void remove_from_net_schedule_list(netif_t *netif)
   5.321 -{
   5.322 -    spin_lock_irq(&net_schedule_list_lock);
   5.323 -    if ( likely(__on_net_schedule_list(netif)) )
   5.324 -    {
   5.325 -        list_del(&netif->list);
   5.326 -        netif->list.next = NULL;
   5.327 -        netif_put(netif);
   5.328 -    }
   5.329 -    spin_unlock_irq(&net_schedule_list_lock);
   5.330 -}
   5.331 -
   5.332 -static void add_to_net_schedule_list_tail(netif_t *netif)
   5.333 -{
   5.334 -    if ( __on_net_schedule_list(netif) )
   5.335 -        return;
   5.336 -
   5.337 -    spin_lock_irq(&net_schedule_list_lock);
   5.338 -    if ( !__on_net_schedule_list(netif) && (netif->status == CONNECTED) )
   5.339 -    {
   5.340 -        list_add_tail(&netif->list, &net_schedule_list);
   5.341 -        netif_get(netif);
   5.342 -    }
   5.343 -    spin_unlock_irq(&net_schedule_list_lock);
   5.344 -}
   5.345 -
   5.346 -static inline void netif_schedule_work(netif_t *netif)
   5.347 -{
   5.348 -    if ( (netif->tx_req_cons != netif->tx->req_prod) &&
   5.349 -         ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
   5.350 -    {
   5.351 -        add_to_net_schedule_list_tail(netif);
   5.352 -        maybe_schedule_tx_action();
   5.353 -    }
   5.354 -}
   5.355 -
   5.356 -void netif_deschedule(netif_t *netif)
   5.357 -{
   5.358 -    remove_from_net_schedule_list(netif);
   5.359 -}
   5.360 -
   5.361 -#if 0
   5.362 -static void tx_credit_callback(unsigned long data)
   5.363 -{
   5.364 -    netif_t *netif = (netif_t *)data;
   5.365 -    netif->remaining_credit = netif->credit_bytes;
   5.366 -    netif_schedule_work(netif);
   5.367 -}
   5.368 -#endif
   5.369 -
   5.370 -static void net_tx_action(unsigned long unused)
   5.371 -{
   5.372 -    struct list_head *ent;
   5.373 -    struct sk_buff *skb;
   5.374 -    netif_t *netif;
   5.375 -    netif_tx_request_t txreq;
   5.376 -    u16 pending_idx;
   5.377 -    NETIF_RING_IDX i;
   5.378 -    struct page *page;
   5.379 -    multicall_entry_t *mcl;
   5.380 -    PEND_RING_IDX dc, dp;
   5.381 -
   5.382 -    if ( (dc = dealloc_cons) == (dp = dealloc_prod) )
   5.383 -        goto skip_dealloc;
   5.384 -
   5.385 -    mcl = tx_mcl;
   5.386 -    while ( dc != dp )
   5.387 -    {
   5.388 -        pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
   5.389 -        mcl[0].op = __HYPERVISOR_update_va_mapping;
   5.390 -        mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
   5.391 -        mcl[0].args[1] = 0;
   5.392 -        mcl[0].args[2] = 0;
   5.393 -        mcl++;        
   5.394 -    }
   5.395 -
   5.396 -    mcl[-1].args[2] = UVMF_FLUSH_TLB;
   5.397 -    if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
   5.398 -        BUG();
   5.399 -
   5.400 -    mcl = tx_mcl;
   5.401 -    while ( dealloc_cons != dp )
   5.402 -    {
   5.403 -        /* The update_va_mapping() must not fail. */
   5.404 -        if ( unlikely(mcl[0].args[5] != 0) )
   5.405 -            BUG();
   5.406 -
   5.407 -        pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
   5.408 -
   5.409 -        netif = pending_tx_info[pending_idx].netif;
   5.410 -
   5.411 -        spin_lock(&netif->tx_lock);
   5.412 -        make_tx_response(netif, pending_tx_info[pending_idx].req.id, 
   5.413 -                         NETIF_RSP_OKAY);
   5.414 -        spin_unlock(&netif->tx_lock);
   5.415 -        
   5.416 -        pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   5.417 -        
   5.418 -        /*
   5.419 -         * Scheduling checks must happen after the above response is posted.
   5.420 -         * This avoids a possible race with a guest OS on another CPU.
   5.421 -         */
   5.422 -        mb();
   5.423 -        if ( (netif->tx_req_cons != netif->tx->req_prod) &&
   5.424 -             ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
   5.425 -            add_to_net_schedule_list_tail(netif);
   5.426 -        
   5.427 -        netif_put(netif);
   5.428 -
   5.429 -        mcl++;
   5.430 -    }
   5.431 -
   5.432 - skip_dealloc:
   5.433 -    mcl = tx_mcl;
   5.434 -    while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
   5.435 -            !list_empty(&net_schedule_list) )
   5.436 -    {
   5.437 -        /* Get a netif from the list with work to do. */
   5.438 -        ent = net_schedule_list.next;
   5.439 -        netif = list_entry(ent, netif_t, list);
   5.440 -        netif_get(netif);
   5.441 -        remove_from_net_schedule_list(netif);
   5.442 -
   5.443 -        /* Work to do? */
   5.444 -        i = netif->tx_req_cons;
   5.445 -        if ( (i == netif->tx->req_prod) ||
   5.446 -             ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
   5.447 -        {
   5.448 -            netif_put(netif);
   5.449 -            continue;
   5.450 -        }
   5.451 -        memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 
   5.452 -               sizeof(txreq));
   5.453 -        netif->tx_req_cons++;
   5.454 -
   5.455 -#if 0
   5.456 -        /* Credit-based scheduling. */
   5.457 -        if ( tx.size > netif->remaining_credit )
   5.458 -        {
   5.459 -            s_time_t now = NOW(), next_credit = 
   5.460 -                netif->credit_timeout.expires + MICROSECS(netif->credit_usec);
   5.461 -            if ( next_credit <= now )
   5.462 -            {
   5.463 -                netif->credit_timeout.expires = now;
   5.464 -                netif->remaining_credit = netif->credit_bytes;
   5.465 -            }
   5.466 -            else
   5.467 -            {
   5.468 -                netif->remaining_credit = 0;
   5.469 -                netif->credit_timeout.expires  = next_credit;
   5.470 -                netif->credit_timeout.data     = (unsigned long)netif;
   5.471 -                netif->credit_timeout.function = tx_credit_callback;
   5.472 -                netif->credit_timeout.cpu      = smp_processor_id();
   5.473 -                add_ac_timer(&netif->credit_timeout);
   5.474 -                break;
   5.475 -            }
   5.476 -        }
   5.477 -        netif->remaining_credit -= tx.size;
   5.478 -#endif
   5.479 -
   5.480 -        netif_schedule_work(netif);
   5.481 -
   5.482 -        if ( unlikely(txreq.size <= PKT_PROT_LEN) || 
   5.483 -             unlikely(txreq.size > ETH_FRAME_LEN) )
   5.484 -        {
   5.485 -            DPRINTK("Bad packet size: %d\n", txreq.size);
   5.486 -            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
   5.487 -            netif_put(netif);
   5.488 -            continue; 
   5.489 -        }
   5.490 -
   5.491 -        /* No crossing a page boundary as the payload mustn't fragment. */
   5.492 -        if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) ) 
   5.493 -        {
   5.494 -            DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 
   5.495 -                    txreq.addr, txreq.size, 
   5.496 -                    (txreq.addr &~PAGE_MASK) + txreq.size);
   5.497 -            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
   5.498 -            netif_put(netif);
   5.499 -            continue;
   5.500 -        }
   5.501 -
   5.502 -        pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
   5.503 -
   5.504 -        if ( unlikely((skb = alloc_skb(PKT_PROT_LEN+16, GFP_ATOMIC)) == NULL) )
   5.505 -        {
   5.506 -            DPRINTK("Can't allocate a skb in start_xmit.\n");
   5.507 -            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
   5.508 -            netif_put(netif);
   5.509 -            break;
   5.510 -        }
   5.511 -
   5.512 -        /* Packets passed to netif_rx() must have some headroom. */
   5.513 -        skb_reserve(skb, 16);
   5.514 -
   5.515 -        mcl[0].op = __HYPERVISOR_update_va_mapping_otherdomain;
   5.516 -        mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
   5.517 -        mcl[0].args[1] = (txreq.addr & PAGE_MASK) | __PAGE_KERNEL;
   5.518 -        mcl[0].args[2] = 0;
   5.519 -        mcl[0].args[3] = netif->domid;
   5.520 -        mcl++;
   5.521 -
   5.522 -        memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
   5.523 -        pending_tx_info[pending_idx].netif = netif;
   5.524 -        *((u16 *)skb->data) = pending_idx;
   5.525 -
   5.526 -        __skb_queue_tail(&tx_queue, skb);
   5.527 -
   5.528 -        pending_cons++;
   5.529 -
   5.530 -        /* Filled the batch queue? */
   5.531 -        if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) )
   5.532 -            break;
   5.533 -    }
   5.534 -
   5.535 -    if ( mcl == tx_mcl )
   5.536 -        return;
   5.537 -
   5.538 -    if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
   5.539 -        BUG();
   5.540 -
   5.541 -    mcl = tx_mcl;
   5.542 -    while ( (skb = __skb_dequeue(&tx_queue)) != NULL )
   5.543 -    {
   5.544 -        pending_idx = *((u16 *)skb->data);
   5.545 -        netif       = pending_tx_info[pending_idx].netif;
   5.546 -        memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
   5.547 -
   5.548 -        /* Check the remap error code. */
   5.549 -        if ( unlikely(mcl[0].args[5] != 0) )
   5.550 -        {
   5.551 -            DPRINTK("Bad page frame\n");
   5.552 -            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
   5.553 -            netif_put(netif);
   5.554 -            kfree_skb(skb);
   5.555 -            mcl++;
   5.556 -            pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   5.557 -            continue;
   5.558 -        }
   5.559 -
   5.560 -        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
   5.561 -            txreq.addr >> PAGE_SHIFT;
   5.562 -
   5.563 -        __skb_put(skb, PKT_PROT_LEN);
   5.564 -        memcpy(skb->data, 
   5.565 -               (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
   5.566 -               PKT_PROT_LEN);
   5.567 -
   5.568 -        page = virt_to_page(MMAP_VADDR(pending_idx));
   5.569 -
   5.570 -        /* Append the packet payload as a fragment. */
   5.571 -        skb_shinfo(skb)->frags[0].page        = page;
   5.572 -        skb_shinfo(skb)->frags[0].size        = txreq.size - PKT_PROT_LEN;
   5.573 -        skb_shinfo(skb)->frags[0].page_offset = 
   5.574 -            (txreq.addr + PKT_PROT_LEN) & ~PAGE_MASK;
   5.575 -        skb_shinfo(skb)->nr_frags = 1;
   5.576 -        skb->data_len  = txreq.size - PKT_PROT_LEN;
   5.577 -        skb->len      += skb->data_len;
   5.578 -
   5.579 -        skb->dev      = netif->dev;
   5.580 -        skb->protocol = eth_type_trans(skb, skb->dev);
   5.581 -
   5.582 -        /*
   5.583 -         * Destructor information. We hideously abuse the 'mapping' pointer,
   5.584 -         * which isn't otherwise used by us. The page deallocator is modified
   5.585 -         * to interpret a non-NULL value as a destructor function to be called.
   5.586 -         * This works okay because in all other cases the pointer must be NULL
   5.587 -         * when the page is freed (normally Linux will explicitly bug out if
   5.588 -         * it sees otherwise.
   5.589 -         */
   5.590 -        page->mapping = (struct address_space *)netif_page_release;
   5.591 -        atomic_set(&page->count, 1);
   5.592 -
   5.593 -        netif->stats.tx_bytes += txreq.size;
   5.594 -        netif->stats.tx_packets++;
   5.595 -
   5.596 -        netif_rx(skb);
   5.597 -        netif->dev->last_rx = jiffies;
   5.598 -
   5.599 -        mcl++;
   5.600 -    }
   5.601 -}
   5.602 -
   5.603 -static void netif_page_release(struct page *page)
   5.604 -{
   5.605 -    unsigned long flags;
   5.606 -    u16 pending_idx = page - virt_to_page(mmap_vstart);
   5.607 -
   5.608 -    /* Stop the abuse. */
   5.609 -    page->mapping = NULL;
   5.610 -
   5.611 -    spin_lock_irqsave(&dealloc_lock, flags);
   5.612 -    dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
   5.613 -    spin_unlock_irqrestore(&dealloc_lock, flags);
   5.614 -
   5.615 -    tasklet_schedule(&net_tx_tasklet);
   5.616 -}
   5.617 -
   5.618 -#if 0
   5.619 -long flush_bufs_for_netif(netif_t *netif)
   5.620 -{
   5.621 -    NETIF_RING_IDX i;
   5.622 -
   5.623 -    /* Return any outstanding receive buffers to the guest OS. */
   5.624 -    spin_lock(&netif->rx_lock);
   5.625 -    for ( i = netif->rx_req_cons; 
   5.626 -          (i != netif->rx->req_prod) &&
   5.627 -              ((i-netif->rx_resp_prod) != NETIF_RX_RING_SIZE);
   5.628 -          i++ )
   5.629 -    {
   5.630 -        make_rx_response(netif,
   5.631 -                         netif->rx->ring[MASK_NETIF_RX_IDX(i)].req.id,
   5.632 -                         NETIF_RSP_DROPPED, 0, 0);
   5.633 -    }
   5.634 -    netif->rx_req_cons = i;
   5.635 -    spin_unlock(&netif->rx_lock);
   5.636 -
   5.637 -    /*
   5.638 -     * Flush pending transmit buffers. The guest may still have to wait for
   5.639 -     * buffers that are queued at a physical NIC.
   5.640 -     */
   5.641 -    spin_lock(&netif->tx_lock);
   5.642 -    for ( i = netif->tx_req_cons; 
   5.643 -          (i != netif->tx->req_prod) &&
   5.644 -              ((i-netif->tx_resp_prod) != NETIF_TX_RING_SIZE);
   5.645 -          i++ )
   5.646 -    {
   5.647 -        make_tx_response(netif,
   5.648 -                         netif->tx->ring[MASK_NETIF_TX_IDX(i)].req.id,
   5.649 -                         NETIF_RSP_DROPPED);
   5.650 -    }
   5.651 -    netif->tx_req_cons = i;
   5.652 -    spin_unlock(&netif->tx_lock);
   5.653 -
   5.654 -    return 0;
   5.655 -}
   5.656 -#endif
   5.657 -
   5.658 -void netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
   5.659 -{
   5.660 -    netif_t *netif = dev_id;
   5.661 -    if ( tx_work_exists(netif) )
   5.662 -    {
   5.663 -        add_to_net_schedule_list_tail(netif);
   5.664 -        maybe_schedule_tx_action();
   5.665 -    }
   5.666 -}
   5.667 -
   5.668 -static void make_tx_response(netif_t *netif, 
   5.669 -                             u16      id,
   5.670 -                             s8       st)
   5.671 -{
   5.672 -    NETIF_RING_IDX i = netif->tx_resp_prod;
   5.673 -    netif_tx_response_t *resp;
   5.674 -
   5.675 -    resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
   5.676 -    resp->id     = id;
   5.677 -    resp->status = st;
   5.678 -    wmb();
   5.679 -    netif->tx->resp_prod = netif->tx_resp_prod = ++i;
   5.680 -
   5.681 -    mb(); /* Update producer before checking event threshold. */
   5.682 -    if ( i == netif->tx->event )
   5.683 -        notify_via_evtchn(netif->evtchn);
   5.684 -}
   5.685 -
   5.686 -static int make_rx_response(netif_t *netif, 
   5.687 -                            u16      id, 
   5.688 -                            s8       st,
   5.689 -                            memory_t addr,
   5.690 -                            u16      size)
   5.691 -{
   5.692 -    NETIF_RING_IDX i = netif->rx_resp_prod;
   5.693 -    netif_rx_response_t *resp;
   5.694 -
   5.695 -    resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
   5.696 -    resp->addr   = addr;
   5.697 -    resp->id     = id;
   5.698 -    resp->status = (s16)size;
   5.699 -    if ( st < 0 )
   5.700 -        resp->status = (s16)st;
   5.701 -    wmb();
   5.702 -    netif->rx->resp_prod = netif->rx_resp_prod = ++i;
   5.703 -
   5.704 -    mb(); /* Update producer before checking event threshold. */
   5.705 -    return (i == netif->rx->event);
   5.706 -}
   5.707 -
   5.708 -static void netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
   5.709 -{
   5.710 -    struct list_head *ent;
   5.711 -    netif_t *netif;
   5.712 -    int i = 0;
   5.713 -
   5.714 -    printk(KERN_ALERT "netif_schedule_list:\n");
   5.715 -    spin_lock_irq(&net_schedule_list_lock);
   5.716 -
   5.717 -    list_for_each ( ent, &net_schedule_list )
   5.718 -    {
   5.719 -        netif = list_entry(ent, netif_t, list);
   5.720 -        printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n",
   5.721 -               i, netif->rx_req_cons, netif->rx_resp_prod);               
   5.722 -        printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
   5.723 -               netif->tx_req_cons, netif->tx_resp_prod);
   5.724 -        printk(KERN_ALERT "   shared(rx_req_prod=%08x rx_resp_prod=%08x\n",
   5.725 -               netif->rx->req_prod, netif->rx->resp_prod);
   5.726 -        printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
   5.727 -               netif->rx->event, netif->tx->req_prod);
   5.728 -        printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
   5.729 -               netif->tx->resp_prod, netif->tx->event);
   5.730 -        i++;
   5.731 -    }
   5.732 -
   5.733 -    spin_unlock_irq(&net_schedule_list_lock);
   5.734 -    printk(KERN_ALERT " ** End of netif_schedule_list **\n");
   5.735 -}
   5.736 -
   5.737 -static int __init init_module(void)
   5.738 -{
   5.739 -    int i;
   5.740 -
   5.741 -    if ( !(start_info.flags & SIF_NET_BE_DOMAIN) &&
   5.742 -	 !(start_info.flags & SIF_INITDOMAIN) )
   5.743 -        return 0;
   5.744 -
   5.745 -    printk("Initialising Xen netif backend\n");
   5.746 -
   5.747 -    skb_queue_head_init(&rx_queue);
   5.748 -    skb_queue_head_init(&tx_queue);
   5.749 -
   5.750 -    netif_interface_init();
   5.751 -
   5.752 -    if ( (mmap_vstart = allocate_empty_lowmem_region(MAX_PENDING_REQS)) == 0 )
   5.753 -        BUG();
   5.754 -
   5.755 -    pending_cons = 0;
   5.756 -    pending_prod = MAX_PENDING_REQS;
   5.757 -    for ( i = 0; i < MAX_PENDING_REQS; i++ )
   5.758 -        pending_ring[i] = i;
   5.759 -
   5.760 -    spin_lock_init(&net_schedule_list_lock);
   5.761 -    INIT_LIST_HEAD(&net_schedule_list);
   5.762 -
   5.763 -    netif_ctrlif_init();
   5.764 -
   5.765 -    (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
   5.766 -                      netif_be_dbg, SA_SHIRQ, 
   5.767 -                      "net-be-dbg", &netif_be_dbg);
   5.768 -
   5.769 -    return 0;
   5.770 -}
   5.771 -
   5.772 -static void cleanup_module(void)
   5.773 -{
   5.774 -    BUG();
   5.775 -}
   5.776 -
   5.777 -module_init(init_module);
   5.778 -module_exit(cleanup_module);
     6.1 --- a/linux-2.4.26-xen-sparse/mkbuildtree	Fri Jul 30 19:57:28 2004 +0000
     6.2 +++ b/linux-2.4.26-xen-sparse/mkbuildtree	Sat Jul 31 16:13:15 2004 +0000
     6.3 @@ -239,3 +239,9 @@ ln -sf ../../../../${LINUX_26}/drivers/x
     6.4  
     6.5  cd ${AD}/arch/xen/drivers/netif/frontend
     6.6  ln -sf ../../../../../${LINUX_26}/drivers/xen/netfront/netfront.c main.c
     6.7 +
     6.8 +cd ${AD}/arch/xen/drivers/netif/backend
     6.9 +ln -sf ../../../../../${LINUX_26}/drivers/xen/netback/common.h
    6.10 +ln -sf ../../../../../${LINUX_26}/drivers/xen/netback/control.c
    6.11 +ln -sf ../../../../../${LINUX_26}/drivers/xen/netback/interface.c
    6.12 +ln -sf ../../../../../${LINUX_26}/drivers/xen/netback/netback.c main.c
     7.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/netback/common.h	Fri Jul 30 19:57:28 2004 +0000
     7.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/netback/common.h	Sat Jul 31 16:13:15 2004 +0000
     7.3 @@ -17,7 +17,14 @@
     7.4  #include <asm-xen/ctrl_if.h>
     7.5  #include <asm/io.h>
     7.6  #include <asm/pgalloc.h>
     7.7 +
     7.8 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
     7.9  #include <asm-xen/netif.h>
    7.10 +#else
    7.11 +#include "../netif.h"
    7.12 +#define irqreturn_t void
    7.13 +#define IRQ_HANDLED
    7.14 +#endif
    7.15  
    7.16  #if 0
    7.17  #define ASSERT(_p) \