win-pvdrivers

diff xennet/xennet_tx.c @ 1007:4cda50fe71d5

merge ndis5 and ndis6 code in xennet
author James Harper <james.harper@bendigoit.com.au>
date Sun Feb 10 23:14:05 2013 +1100 (2013-02-10)
parents 3c7778b9f877
children c21a4feb4a27
line diff
     1.1 --- a/xennet/xennet_tx.c	Sun Feb 10 23:12:03 2013 +1100
     1.2 +++ b/xennet/xennet_tx.c	Sun Feb 10 23:14:05 2013 +1100
     1.3 @@ -1,5 +1,712 @@
     1.4 -#ifdef _GPLPV_NDIS5
     1.5 -#include "xennet5_tx.c"
     1.6 +/*
     1.7 +PV Net Driver for Windows Xen HVM Domains
     1.8 +Copyright (C) 2007 James Harper
     1.9 +Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
    1.10 +
    1.11 +This program is free software; you can redistribute it and/or
    1.12 +modify it under the terms of the GNU General Public License
    1.13 +as published by the Free Software Foundation; either version 2
    1.14 +of the License, or (at your option) any later version.
    1.15 +
    1.16 +This program is distributed in the hope that it will be useful,
    1.17 +but WITHOUT ANY WARRANTY; without even the implied warranty of
    1.18 +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    1.19 +GNU General Public License for more details.
    1.20 +
    1.21 +You should have received a copy of the GNU General Public License
    1.22 +along with this program; if not, write to the Free Software
    1.23 +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
    1.24 +*/
    1.25 +
    1.26 +#include "xennet.h"
    1.27 +
    1.28 +
    1.29 +static USHORT
    1.30 +get_id_from_freelist(struct xennet_info *xi)
    1.31 +{
    1.32 +  NT_ASSERT(xi->tx_id_free);
    1.33 +  xi->tx_id_free--;
    1.34 +
    1.35 +  return xi->tx_id_list[xi->tx_id_free];
    1.36 +}
    1.37 +
    1.38 +static VOID
    1.39 +put_id_on_freelist(struct xennet_info *xi, USHORT id)
    1.40 +{
    1.41 +  xi->tx_id_list[xi->tx_id_free] = id;
    1.42 +  xi->tx_id_free++;
    1.43 +}
    1.44 +
    1.45 +#define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
    1.46 +
    1.47 +static __forceinline struct netif_tx_request *
    1.48 +XenNet_PutCbOnRing(struct xennet_info *xi, PVOID coalesce_buf, ULONG length, grant_ref_t gref)
    1.49 +{
    1.50 +  struct netif_tx_request *tx;
    1.51 +  tx = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
    1.52 +  xi->tx_ring.req_prod_pvt++;
    1.53 +  xi->tx_ring_free--;
    1.54 +  tx->id = get_id_from_freelist(xi);
    1.55 +  NT_ASSERT(xi->tx_shadows[tx->id].gref == INVALID_GRANT_REF);
    1.56 +  NT_ASSERT(!xi->tx_shadows[tx->id].cb);
    1.57 +  xi->tx_shadows[tx->id].cb = coalesce_buf;
    1.58 +  tx->gref = XnGrantAccess(xi->handle, (ULONG)(MmGetPhysicalAddress(coalesce_buf).QuadPart >> PAGE_SHIFT), FALSE, gref, (ULONG)'XNTX');
    1.59 +  xi->tx_shadows[tx->id].gref = tx->gref;
    1.60 +  tx->offset = 0;
    1.61 +  tx->size = (USHORT)length;
    1.62 +  NT_ASSERT(tx->offset + tx->size <= PAGE_SIZE);
    1.63 +  NT_ASSERT(tx->size);
    1.64 +  return tx;
    1.65 +}
    1.66 +
    1.67 +static VOID dump_packet_data(PNDIS_PACKET packet, PCHAR header) {
    1.68 +  UINT mdl_count;
    1.69 +  PMDL first_mdl;
    1.70 +  UINT total_length;
    1.71 +  
    1.72 +  NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &first_mdl, (PUINT)&total_length);
    1.73 +  FUNCTION_MSG("%s mdl_count = %d, first_mdl = %p, total_length = %d\n", header, mdl_count, first_mdl, total_length);
    1.74 +}
    1.75 +  
    1.76 +/* Called at DISPATCH_LEVEL with tx_lock held */
    1.77 +/*
    1.78 + * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
    1.79 + */
    1.80 +static BOOLEAN
    1.81 +XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet)
    1.82 +{
    1.83 +  struct netif_tx_request *tx0 = NULL;
    1.84 +  struct netif_tx_request *txN = NULL;
    1.85 +  struct netif_extra_info *ei = NULL;
    1.86 +  ULONG mss = 0;
    1.87 +  #if NTDDI_VERSION < NTDDI_VISTA
    1.88 +  PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
    1.89 +  #else
    1.90 +  NDIS_TCP_LARGE_SEND_OFFLOAD_NET_BUFFER_LIST_INFO lso_info;
    1.91 +  NDIS_TCP_IP_CHECKSUM_NET_BUFFER_LIST_INFO csum_info;
    1.92 +  #endif
    1.93 +  uint16_t flags = NETTXF_more_data;
    1.94 +  packet_info_t pi;
    1.95 +  BOOLEAN ndis_lso = FALSE;
    1.96 +  BOOLEAN xen_gso = FALSE;
    1.97 +  ULONG remaining;
    1.98 +  ULONG frags = 0;
    1.99 +  BOOLEAN coalesce_required = FALSE;
   1.100 +  PVOID coalesce_buf;
   1.101 +  ULONG coalesce_remaining = 0;
   1.102 +  grant_ref_t gref;
   1.103 +  ULONG tx_length = 0;
   1.104 +  UINT mdl_count;
   1.105 +  
   1.106 +  gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
   1.107 +  if (gref == INVALID_GRANT_REF)
   1.108 +  {
   1.109 +    FUNCTION_MSG("out of grefs\n");
   1.110 +    return FALSE;
   1.111 +  }
   1.112 +  coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
   1.113 +  if (!coalesce_buf)
   1.114 +  {
   1.115 +    XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
   1.116 +    FUNCTION_MSG("out of memory\n");
   1.117 +    return FALSE;
   1.118 +  }
   1.119 +  XenNet_ClearPacketInfo(&pi);
   1.120 +  #if NTDDI_VERSION < NTDDI_VISTA
   1.121 +  NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &pi.first_mdl, (PUINT)&pi.total_length);
   1.122 +  pi.curr_mdl = pi.first_mdl;
   1.123 +  #else
   1.124 +  /* create a new MDL over the data portion of the first MDL in the packet... it's just easier this way */
   1.125 +  IoBuildPartialMdl(packet->CurrentMdl,
   1.126 +    &pi.first_mdl_storage,
   1.127 +    (PUCHAR)MmGetMdlVirtualAddress(nb->CurrentMdl) + nb->CurrentMdlOffset,
   1.128 +    MmGetMdlByteCount(nb->CurrentMdl) - nb->CurrentMdlOffset);
   1.129 +  pi.total_length = nb->DataLength;
   1.130 +  pi.first_mdl_storage.Next = nb->CurrentMdl->Next;
   1.131 +  pi.first_mdl = pi.curr_mdl = &pi.first_mdl_storage;
   1.132 +  #endif
   1.133 +  pi.first_mdl_offset = pi.curr_mdl_offset = 0;
   1.134 +  remaining = min(pi.total_length, PAGE_SIZE);
   1.135 +  while (remaining) { /* this much gets put in the header */
   1.136 +    ULONG length = XenNet_QueryData(&pi, remaining);
   1.137 +    remaining -= length;
   1.138 +    XenNet_EatData(&pi, length);
   1.139 +  }
   1.140 +  frags++;
   1.141 +  if (pi.total_length > PAGE_SIZE) { /* these are the frags we care about */
   1.142 +    remaining = pi.total_length - PAGE_SIZE;
   1.143 +    while (remaining) {
   1.144 +      ULONG length = XenNet_QueryData(&pi, PAGE_SIZE);
   1.145 +      if (length != 0) {
   1.146 +        frags++;
   1.147 +        if (frags > LINUX_MAX_SG_ELEMENTS)
   1.148 +          break; /* worst case there could be hundreds of fragments - leave the loop now */
   1.149 +      }
   1.150 +      remaining -= length;
   1.151 +      XenNet_EatData(&pi, length);
   1.152 +    }
   1.153 +  }
   1.154 +  if (frags > LINUX_MAX_SG_ELEMENTS) {
   1.155 +    frags = LINUX_MAX_SG_ELEMENTS;
   1.156 +    coalesce_required = TRUE;
   1.157 +  }
   1.158 +
   1.159 +  /* if we have enough space on the ring then we have enough id's so no need to check for that */
   1.160 +  if (xi->tx_ring_free < frags + 1) {
   1.161 +    XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
   1.162 +    NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
   1.163 +    //FUNCTION_MSG("Full on send - ring full\n");
   1.164 +    return FALSE;
   1.165 +  }
   1.166 +  XenNet_ParsePacketHeader(&pi, coalesce_buf, PAGE_SIZE);
   1.167 +  remaining = pi.total_length - pi.header_length;
   1.168 +  if (pi.ip_version == 4 && pi.ip_proto == 6 && pi.ip4_length == 0) {
   1.169 +    *((PUSHORT)(pi.header + 0x10)) = GET_NET_USHORT((USHORT)pi.total_length - XN_HDR_SIZE);
   1.170 +  }
   1.171 +
   1.172 +  #if NTDDI_VERSION < NTDDI_VISTA
   1.173 +  if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP) {
   1.174 +    csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
   1.175 +      packet, TcpIpChecksumPacketInfo);
   1.176 +    if (csum_info->Transmit.NdisPacketChecksumV4) {
   1.177 +      if (csum_info->Transmit.NdisPacketTcpChecksum) {
   1.178 +        flags |= NETTXF_csum_blank | NETTXF_data_validated;
   1.179 +      } else if (csum_info->Transmit.NdisPacketUdpChecksum) {
   1.180 +        flags |= NETTXF_csum_blank | NETTXF_data_validated;
   1.181 +      }
   1.182 +    }
   1.183 +  }
   1.184 +  #else
   1.185 +  csum_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(nb), TcpIpChecksumNetBufferListInfo);
   1.186 +  if (csum_info.Transmit.IsIPv4) {
   1.187 +    if (csum_info.Transmit.TcpChecksum) {
   1.188 +      flags |= NETTXF_csum_blank | NETTXF_data_validated;
   1.189 +    } else if (csum_info.Transmit.UdpChecksum) {
   1.190 +      flags |= NETTXF_csum_blank | NETTXF_data_validated;
   1.191 +    }
   1.192 +  } else if (csum_info.Transmit.IsIPv6) {
   1.193 +    KdPrint((__DRIVER_NAME "     Transmit.IsIPv6 not supported\n"));
   1.194 +  }
   1.195 +  #endif
   1.196 +  
   1.197 +  #if NTDDI_VERSION < NTDDI_VISTA
   1.198 +  mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
   1.199 +  #else
   1.200 +  lso_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(nb), TcpLargeSendNetBufferListInfo);
   1.201 +  switch (lso_info.Transmit.Type) {
   1.202 +  case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
   1.203 +    mss = lso_info.LsoV1Transmit.MSS;
   1.204 +    /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
   1.205 +    break;
   1.206 +  case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
   1.207 +    mss = lso_info.LsoV2Transmit.MSS;
   1.208 +    /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
   1.209 +    break;
   1.210 +  }
   1.211 +  #endif
   1.212 +  if (mss && pi.parse_result == PARSE_OK) {
   1.213 +    ndis_lso = TRUE;
   1.214 +  }
   1.215 +
   1.216 +  if (ndis_lso) {    
   1.217 +    flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
   1.218 +    if (pi.tcp_length >= mss) {
   1.219 +      flags |= NETTXF_extra_info;
   1.220 +      xen_gso = TRUE;
   1.221 +    }
   1.222 +  }
   1.223 +/*
   1.224 +* See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
   1.225 +* (C) rest of requests on the ring. Only (A) has csum flags.
   1.226 +*/
   1.227 +
   1.228 +  /* (A) */
   1.229 +  tx0 = XenNet_PutCbOnRing(xi, coalesce_buf, pi.header_length, gref);
   1.230 +  NT_ASSERT(tx0); /* this will never happen */
   1.231 +  tx0->flags = flags;
   1.232 +  tx_length += pi.header_length;
   1.233 +
   1.234 +  /* lso implies IpHeaderChecksum */
   1.235 +  #if NTDDI_VERSION < NTDDI_VISTA
   1.236 +  if (ndis_lso) {
   1.237 +    XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
   1.238 +  }
   1.239 +  #else
   1.240 +  if (ndis_lso || csum_info.Transmit.IpHeaderChecksum) {
   1.241 +    XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
   1.242 +  }
   1.243 +  #endif
   1.244 +  txN = tx0;
   1.245 +
   1.246 +  /* (B) */
   1.247 +  if (xen_gso) {
   1.248 +    NT_ASSERT(flags & NETTXF_extra_info);
   1.249 +    ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
   1.250 +    //KdPrint((__DRIVER_NAME "     pos = %d\n", xi->tx_ring.req_prod_pvt));
   1.251 +    xi->tx_ring.req_prod_pvt++;
   1.252 +    xi->tx_ring_free--;
   1.253 +    ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
   1.254 +    ei->flags = 0;
   1.255 +    ei->u.gso.size = (USHORT)mss;
   1.256 +    ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
   1.257 +    ei->u.gso.pad = 0;
   1.258 +    ei->u.gso.features = 0;
   1.259 +  }
   1.260 +
   1.261 +  NT_ASSERT(xi->current_sg_supported || !remaining);
   1.262 +  
   1.263 +  /* (C) - only if data is remaining */
   1.264 +  coalesce_buf = NULL;
   1.265 +  while (remaining > 0) {
   1.266 +    ULONG length;
   1.267 +    PFN_NUMBER pfn;
   1.268 +
   1.269 +    NT_ASSERT(pi.curr_mdl);
   1.270 +    if (coalesce_required) {
   1.271 +      PVOID va;
   1.272 +      if (!coalesce_buf) {
   1.273 +        gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
   1.274 +        if (gref == INVALID_GRANT_REF) {
   1.275 +          KdPrint((__DRIVER_NAME "     out of grefs - partial send\n"));
   1.276 +          break;
   1.277 +        }
   1.278 +        coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
   1.279 +        if (!coalesce_buf) {
   1.280 +          XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
   1.281 +          KdPrint((__DRIVER_NAME "     out of memory - partial send\n"));
   1.282 +          break;
   1.283 +        }
   1.284 +        coalesce_remaining = min(PAGE_SIZE, remaining);
   1.285 +      }
   1.286 +      length = XenNet_QueryData(&pi, coalesce_remaining);
   1.287 +      va = NdisBufferVirtualAddressSafe(pi.curr_mdl, LowPagePriority);
   1.288 +      if (!va) {
   1.289 +        KdPrint((__DRIVER_NAME "     failed to map buffer va - partial send\n"));
   1.290 +        coalesce_remaining = 0;
   1.291 +        remaining -= min(PAGE_SIZE, remaining);
   1.292 +        NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
   1.293 +      } else {
   1.294 +        memcpy((PUCHAR)coalesce_buf + min(PAGE_SIZE, remaining) - coalesce_remaining, (PUCHAR)va + pi.curr_mdl_offset, length);
   1.295 +        coalesce_remaining -= length;
   1.296 +      }
   1.297 +    } else {
   1.298 +      length = XenNet_QueryData(&pi, PAGE_SIZE);
   1.299 +    }
   1.300 +    if (!length || coalesce_remaining) { /* sometimes there are zero length buffers... */
   1.301 +      XenNet_EatData(&pi, length); /* do this so we actually move to the next buffer */
   1.302 +      continue;
   1.303 +    }
   1.304 +
   1.305 +    if (coalesce_buf) {
   1.306 +      if (remaining) {
   1.307 +        txN = XenNet_PutCbOnRing(xi, coalesce_buf, min(PAGE_SIZE, remaining), gref);
   1.308 +        NT_ASSERT(txN);
   1.309 +        coalesce_buf = NULL;
   1.310 +        tx_length += min(PAGE_SIZE, remaining);
   1.311 +        remaining -= min(PAGE_SIZE, remaining);
   1.312 +      }
   1.313 +    } else {
   1.314 +      ULONG offset;
   1.315 +      
   1.316 +      gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
   1.317 +      if (gref == INVALID_GRANT_REF) {
   1.318 +        KdPrint((__DRIVER_NAME "     out of grefs - partial send\n"));
   1.319 +        break;
   1.320 +      }
   1.321 +      txN = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
   1.322 +      xi->tx_ring.req_prod_pvt++;
   1.323 +      xi->tx_ring_free--;
   1.324 +      txN->id = get_id_from_freelist(xi);
   1.325 +      NT_ASSERT(!xi->tx_shadows[txN->id].cb);
   1.326 +      offset = MmGetMdlByteOffset(pi.curr_mdl) + pi.curr_mdl_offset;
   1.327 +      pfn = MmGetMdlPfnArray(pi.curr_mdl)[offset >> PAGE_SHIFT];
   1.328 +      txN->offset = (USHORT)offset & (PAGE_SIZE - 1);
   1.329 +      txN->gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, gref, (ULONG)'XNTX');
   1.330 +      NT_ASSERT(xi->tx_shadows[txN->id].gref == INVALID_GRANT_REF);
   1.331 +      xi->tx_shadows[txN->id].gref = txN->gref;
   1.332 +      //ASSERT(sg->Elements[sg_element].Length > sg_offset);
   1.333 +      txN->size = (USHORT)length;
   1.334 +      NT_ASSERT(txN->offset + txN->size <= PAGE_SIZE);
   1.335 +      NT_ASSERT(txN->size);
   1.336 +      NT_ASSERT(txN->gref != INVALID_GRANT_REF);
   1.337 +      remaining -= length;
   1.338 +      tx_length += length;
   1.339 +    }
   1.340 +    tx0->size = tx0->size + txN->size;
   1.341 +    txN->flags = NETTXF_more_data;
   1.342 +    XenNet_EatData(&pi, length);
   1.343 +  }
   1.344 +  txN->flags &= ~NETTXF_more_data;
   1.345 +  NT_ASSERT(tx0->size == pi.total_length);
   1.346 +  NT_ASSERT(!xi->tx_shadows[txN->id].packet);
   1.347 +  xi->tx_shadows[txN->id].packet = packet;
   1.348 +
   1.349 +  #if NTDDI_VERSION < NTDDI_VISTA
   1.350 +  if (ndis_lso) {
   1.351 +    NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length);
   1.352 +  }
   1.353 +  #else
   1.354 +  switch (lso_info.Transmit.Type) {
   1.355 +  case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
   1.356 +    lso_info.LsoV1TransmitComplete.TcpPayload = tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length;
   1.357 +    break;
   1.358 +  case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
   1.359 +    break;
   1.360 +  }
   1.361 +  #endif
   1.362 +
   1.363 +  xi->tx_outstanding++;
   1.364 +  return TRUE;
   1.365 +}
   1.366 +
   1.367 +/* Called at DISPATCH_LEVEL with tx_lock held */
   1.368 +static VOID
   1.369 +XenNet_SendQueuedPackets(struct xennet_info *xi)
   1.370 +{
   1.371 +  PLIST_ENTRY entry;
   1.372 +  #if NTDDI_VERSION < NTDDI_VISTA
   1.373 +  PNDIS_PACKET packet;
   1.374 +  #else
   1.375 +  PNET_BUFFER packet;
   1.376 +  #endif
   1.377 +  int notify;
   1.378 +
   1.379 +  if (xi->device_state != DEVICE_STATE_ACTIVE)
   1.380 +    return;
   1.381 +
   1.382 +  while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
   1.383 +    entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
   1.384 +    #if NTDDI_VERSION < NTDDI_VISTA
   1.385 +    packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
   1.386 +    #else
   1.387 +    packet = CONTAINING_RECORD(entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
   1.388 +    #endif    
   1.389 +    if (!XenNet_HWSendPacket(xi, packet)) {
   1.390 +      InsertHeadList(&xi->tx_waiting_pkt_list, entry);
   1.391 +      break;
   1.392 +    }
   1.393 +  }
   1.394 +
   1.395 +  RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx_ring, notify);
   1.396 +  if (notify) {
   1.397 +    XnNotify(xi->handle, xi->event_channel);
   1.398 +  }
   1.399 +}
   1.400 +
   1.401 +// Called at DISPATCH_LEVEL
   1.402 +VOID
   1.403 +XenNet_TxBufferGC(struct xennet_info *xi, BOOLEAN dont_set_event) {
   1.404 +  RING_IDX cons, prod;
   1.405 +  #if NTDDI_VERSION < NTDDI_VISTA
   1.406 +  PNDIS_PACKET head = NULL, tail = NULL;
   1.407 +  PNDIS_PACKET packet;
   1.408 +  #else
   1.409 +  PNET_BUFFER_LIST head = NULL;
   1.410 +  PNET_BUFFER_LIST tail = NULL;  
   1.411 +  PNET_BUFFER_LIST nbl;
   1.412 +  PNET_BUFFER packet;
   1.413 +  #endif
   1.414 +  ULONG tx_packets = 0;
   1.415 +
   1.416 +  NT_ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
   1.417 +
   1.418 +  KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
   1.419 +
   1.420 +  if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
   1.421 +    /* there is a chance that our Dpc had been queued just before the shutdown... */
   1.422 +    KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
   1.423 +    KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
   1.424 +    return;
   1.425 +  }
   1.426 +
   1.427 +  do {
   1.428 +    prod = xi->tx_ring.sring->rsp_prod;
   1.429 +    KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
   1.430 +
   1.431 +    for (cons = xi->tx_ring.rsp_cons; cons != prod; cons++)
   1.432 +    {
   1.433 +      struct netif_tx_response *txrsp;
   1.434 +      tx_shadow_t *shadow;
   1.435 +      
   1.436 +      txrsp = RING_GET_RESPONSE(&xi->tx_ring, cons);
   1.437 +      
   1.438 +      xi->tx_ring_free++;
   1.439 +      
   1.440 +      if (txrsp->status == NETIF_RSP_NULL) {
   1.441 +        continue;
   1.442 +      }
   1.443 +
   1.444 +      shadow = &xi->tx_shadows[txrsp->id];
   1.445 +      if (shadow->cb) {
   1.446 +        NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, shadow->cb);
   1.447 +        shadow->cb = NULL;
   1.448 +      }
   1.449 +      
   1.450 +      if (shadow->gref != INVALID_GRANT_REF) {
   1.451 +        XnEndAccess(xi->handle, shadow->gref, FALSE, (ULONG)'XNTX');
   1.452 +        shadow->gref = INVALID_GRANT_REF;
   1.453 +      }
   1.454 +      
   1.455 +      if (shadow->packet) {
   1.456 +        PMDL mdl;
   1.457 +        PUCHAR header;
   1.458 +        packet = shadow->packet;
   1.459 +        #if NTDDI_VERSION < NTDDI_VISTA
   1.460 +        mdl = NDIS_PACKET_FIRST_NDIS_BUFFER(packet);
   1.461 +        #else
   1.462 +        mdl = NET_BUFFER_CURRENT_MDL(packet);
   1.463 +        #endif
   1.464 +        #pragma warning(suppress:28193) /* already mapped so guaranteed to work */
   1.465 +        header = MmGetSystemAddressForMdlSafe(mdl, LowPagePriority);
   1.466 +        #if NTDDI_VERSION < NTDDI_VISTA
   1.467 +        #else
   1.468 +        header += NET_BUFFER_CURRENT_MDL_OFFSET(nb);
   1.469 +        #endif
   1.470 +
   1.471 +        #if NTDDI_VERSION < NTDDI_VISTA
   1.472 +        #else
   1.473 +        xi->stats.ifHCOutOctets += nb->DataLength;
   1.474 +        if (nb->DataLength < XN_HDR_SIZE || !(header[0] & 0x01)) {
   1.475 +          /* unicast or tiny packet */
   1.476 +          xi->stats.ifHCOutUcastPkts++;
   1.477 +          xi->stats.ifHCOutUcastOctets += nb->DataLength;
   1.478 +        }
   1.479 +        else if (header[0] == 0xFF && header[1] == 0xFF && header[2] == 0xFF
   1.480 +                 && header[3] == 0xFF && header[4] == 0xFF && header[5] == 0xFF) {
   1.481 +          /* broadcast */
   1.482 +          xi->stats.ifHCOutBroadcastPkts++;
   1.483 +          xi->stats.ifHCOutBroadcastOctets += nb->DataLength;
   1.484 +        } else {
   1.485 +          /* multicast */
   1.486 +          xi->stats.ifHCOutMulticastPkts++;
   1.487 +          xi->stats.ifHCOutMulticastOctets += nb->DataLength;
   1.488 +        }
   1.489 +        #endif
   1.490 +        
   1.491 +        #if NTDDI_VERSION < NTDDI_VISTA
   1.492 +        PACKET_NEXT_PACKET(packet) = NULL;
   1.493 +        if (!head) {
   1.494 +          head = packet;
   1.495 +        } else {
   1.496 +          PACKET_NEXT_PACKET(tail) = packet;
   1.497 +        }
   1.498 +        tail = packet;
   1.499 +        #else
   1.500 +        nbl = NB_NBL(nb);
   1.501 +        NBL_REF(nbl)--;
   1.502 +        if (!NBL_REF(nbl)) {
   1.503 +          NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL;
   1.504 +          if (nbl_head) {
   1.505 +            NET_BUFFER_LIST_NEXT_NBL(nbl_tail) = nbl;
   1.506 +            nbl_tail = nbl;
   1.507 +          } else {
   1.508 +            nbl_head = nbl;
   1.509 +            nbl_tail = nbl;
   1.510 +          }
   1.511 +        }
   1.512 +        #endif
   1.513 +        shadow->packet = NULL;
   1.514 +        tx_packets++;
   1.515 +      }
   1.516 +      put_id_on_freelist(xi, txrsp->id);
   1.517 +    }
   1.518 +
   1.519 +    xi->tx_ring.rsp_cons = prod;
   1.520 +    /* resist the temptation to set the event more than +1... it breaks things */
   1.521 +    if (!dont_set_event)
   1.522 +      xi->tx_ring.sring->rsp_event = prod + 1;
   1.523 +    KeMemoryBarrier();
   1.524 +  } while (prod != xi->tx_ring.sring->rsp_prod);
   1.525 +
   1.526 +  /* if queued packets, send them now */
   1.527 +  XenNet_SendQueuedPackets(xi);
   1.528 +
   1.529 +  KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
   1.530 +
   1.531 +  /* must be done without holding any locks */
   1.532 +  #if NTDDI_VERSION < NTDDI_VISTA
   1.533 +  while (head) {
   1.534 +    packet = (PNDIS_PACKET)head;
   1.535 +    head = PACKET_NEXT_PACKET(packet);
   1.536 +    NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
   1.537 +  }
   1.538 +  #else
   1.539 +  if (nbl_head)
   1.540 +    NdisMSendNetBufferListsComplete(xi->adapter_handle, nbl_head, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
   1.541 +  #endif
   1.542 +
   1.543 +  /* must be done after we have truly given back all packets */
   1.544 +  KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
   1.545 +  xi->tx_outstanding -= tx_packets;
   1.546 +  if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
   1.547 +    KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
   1.548 +  }
   1.549 +  KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
   1.550 +}
   1.551 +
   1.552 +#if NTDDI_VERSION < NTDDI_VISTA
   1.553 +VOID
   1.554 +XenNet_SendPackets(NDIS_HANDLE MiniportAdapterContext, PPNDIS_PACKET PacketArray, UINT NumberOfPackets) {
   1.555 +  struct xennet_info *xi = MiniportAdapterContext;
   1.556 +  PNDIS_PACKET packet;
   1.557 +  UINT i;
   1.558 +  PLIST_ENTRY entry;
   1.559 +  KIRQL old_irql;
   1.560 +
   1.561 +  if (xi->device_state != DEVICE_STATE_ACTIVE) {
   1.562 +    for (i = 0; i < NumberOfPackets; i++) {
   1.563 +      NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
   1.564 +    }
   1.565 +    return;
   1.566 +  }
   1.567 +
   1.568 +  KeAcquireSpinLock(&xi->tx_lock, &old_irql);
   1.569 +
   1.570 +  for (i = 0; i < NumberOfPackets; i++) {
   1.571 +    packet = PacketArray[i];
   1.572 +    ASSERT(packet);
   1.573 +    entry = &PACKET_LIST_ENTRY(packet);
   1.574 +    InsertTailList(&xi->tx_waiting_pkt_list, entry);
   1.575 +  }
   1.576 +
   1.577 +  XenNet_SendQueuedPackets(xi);
   1.578 +
   1.579 +  KeReleaseSpinLock(&xi->tx_lock, old_irql);
   1.580 +}
   1.581  #else
   1.582 -#include "xennet6_tx.c"
   1.583 +// called at <= DISPATCH_LEVEL
   1.584 +VOID
   1.585 +XenNet_SendNetBufferLists(
   1.586 +    NDIS_HANDLE adapter_context,
   1.587 +    PNET_BUFFER_LIST nb_lists,
   1.588 +    NDIS_PORT_NUMBER port_number,
   1.589 +    ULONG send_flags) {
   1.590 +  struct xennet_info *xi = adapter_context;
   1.591 +  PLIST_ENTRY nb_entry;
   1.592 +  KIRQL old_irql;
   1.593 +  PNET_BUFFER_LIST curr_nbl;
   1.594 +  PNET_BUFFER_LIST next_nbl;
   1.595 +
   1.596 +  UNREFERENCED_PARAMETER(port_number);
   1.597 +
   1.598 +  if (xi->device_state == DEVICE_STATE_INACTIVE) {
   1.599 +    curr_nbl = nb_lists;
   1.600 +    for (curr_nbl = nb_lists; curr_nbl; curr_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl)) {
   1.601 +      curr_nbl->Status = NDIS_STATUS_FAILURE;
   1.602 +    }
   1.603 +    /* this actions the whole list */
   1.604 +    NdisMSendNetBufferListsComplete(xi->adapter_handle, nb_lists, (send_flags & NDIS_SEND_FLAGS_DISPATCH_LEVEL)?NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL:0);
   1.605 +    return;
   1.606 +  }
   1.607 +
   1.608 +  KeAcquireSpinLock(&xi->tx_lock, &old_irql);
   1.609 +  
   1.610 +  for (curr_nbl = nb_lists; curr_nbl; curr_nbl = next_nbl) {
   1.611 +    PNET_BUFFER curr_nb;
   1.612 +    NBL_REF(curr_nbl) = 0;
   1.613 +    next_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl);
   1.614 +    NET_BUFFER_LIST_NEXT_NBL(curr_nbl) = NULL;
   1.615 +    for (curr_nb = NET_BUFFER_LIST_FIRST_NB(curr_nbl); curr_nb; curr_nb = NET_BUFFER_NEXT_NB(curr_nb)) {
   1.616 +      NB_NBL(curr_nb) = curr_nbl;
   1.617 +      nb_entry = &NB_LIST_ENTRY(curr_nb);
   1.618 +      InsertTailList(&xi->tx_waiting_pkt_list, nb_entry);
   1.619 +      NBL_REF(curr_nbl)++;
   1.620 +    }
   1.621 +  }
   1.622 +
   1.623 +  XenNet_SendQueuedPackets(xi);
   1.624 +
   1.625 +  KeReleaseSpinLock(&xi->tx_lock, old_irql);
   1.626 +}
   1.627  #endif
   1.628 +
   1.629 +#if 0
   1.630 +VOID
   1.631 +XenNet_CancelSend(NDIS_HANDLE adapter_context, PVOID cancel_id)
   1.632 +{
   1.633 +  UNREFERENCED_PARAMETER(adapter_context);
   1.634 +  UNREFERENCED_PARAMETER(cancel_id);
   1.635 +  FUNCTION_ENTER();
   1.636 +    
   1.637 +  FUNCTION_EXIT();
   1.638 +}
   1.639 +#endif
   1.640 +
   1.641 +BOOLEAN
   1.642 +XenNet_TxInit(xennet_info_t *xi) {
   1.643 +  USHORT i;
   1.644 +  UNREFERENCED_PARAMETER(xi);
   1.645 +  
   1.646 +  KeInitializeSpinLock(&xi->tx_lock);
   1.647 +  InitializeListHead(&xi->tx_waiting_pkt_list);
   1.648 +
   1.649 +  KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
   1.650 +  xi->tx_outstanding = 0;
   1.651 +  xi->tx_ring_free = NET_TX_RING_SIZE;
   1.652 +  
   1.653 +  NdisInitializeNPagedLookasideList(&xi->tx_lookaside_list, NULL, NULL, 0,
   1.654 +    PAGE_SIZE, XENNET_POOL_TAG, 0);
   1.655 +
   1.656 +  xi->tx_id_free = 0;
   1.657 +  for (i = 0; i < NET_TX_RING_SIZE; i++) {
   1.658 +    xi->tx_shadows[i].gref = INVALID_GRANT_REF;
   1.659 +    xi->tx_shadows[i].cb = NULL;
   1.660 +    put_id_on_freelist(xi, i);
   1.661 +  }
   1.662 +
   1.663 +  return TRUE;
   1.664 +}
   1.665 +
   1.666 +/*
   1.667 +The ring is completely closed down now. We just need to empty anything left
   1.668 +on our freelists and harvest anything left on the rings.
   1.669 +*/
   1.670 +
   1.671 +BOOLEAN
   1.672 +XenNet_TxShutdown(xennet_info_t *xi) {
   1.673 +  #if NTDDI_VERSION < NTDDI_VISTA
   1.674 +  PNDIS_PACKET packet;
   1.675 +  #else
   1.676 +  PNET_BUFFER packet;
   1.677 +  PNET_BUFFER_LIST nbl;
   1.678 +  #endif
   1.679 +  PLIST_ENTRY entry;
   1.680 +  LARGE_INTEGER timeout;
   1.681 +  KIRQL old_irql;
   1.682 +
   1.683 +  FUNCTION_ENTER();
   1.684 +
   1.685 +  KeAcquireSpinLock(&xi->tx_lock, &old_irql);
   1.686 +
   1.687 +  while (xi->tx_outstanding) {
   1.688 +    KeReleaseSpinLock(&xi->tx_lock, old_irql);
   1.689 +    KdPrint((__DRIVER_NAME "     Waiting for %d remaining packets to be sent\n", xi->tx_outstanding));
   1.690 +    timeout.QuadPart = -1 * 1 * 1000 * 1000 * 10; /* 1 second */
   1.691 +    KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, &timeout);
   1.692 +    KeAcquireSpinLock(&xi->tx_lock, &old_irql);
   1.693 +  }
   1.694 +  KeReleaseSpinLock(&xi->tx_lock, old_irql);
   1.695 +
   1.696 +  /* Free packets in tx queue */
   1.697 +  while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
   1.698 +    entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
   1.699 +    #if NTDDI_VERSION < NTDDI_VISTA
   1.700 +    packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
   1.701 +    NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
   1.702 +    entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
   1.703 +    #else
   1.704 +    packet = CONTAINING_RECORD(nb_entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
   1.705 +    nbl = NB_NBL(nb);
   1.706 +    NBL_REF(nbl)--;
   1.707 +    if (!NBL_REF(nbl)) {
   1.708 +      nbl->Status = NDIS_STATUS_FAILURE;
   1.709 +      NdisMSendNetBufferListsComplete(xi->adapter_handle, nbl, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
   1.710 +    }
   1.711 +    #endif
   1.712 +  }
   1.713 +  NdisDeleteNPagedLookasideList(&xi->tx_lookaside_list);
   1.714 +
   1.715 +  FUNCTION_EXIT();
   1.716 +
   1.717 +  return TRUE;
   1.718 +}