win-pvdrivers

annotate xennet/xennet_tx.c @ 1023:1ce315b193d1

Change all NT_ASSERT to XN_ASSERT
author James Harper <james.harper@bendigoit.com.au>
date Tue Feb 19 15:12:35 2013 +1100 (2013-02-19)
parents c21a4feb4a27
children 00d29add6a2a
rev   line source
james@1007 1 /*
james@1007 2 PV Net Driver for Windows Xen HVM Domains
james@1007 3 Copyright (C) 2007 James Harper
james@1007 4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
james@1007 5
james@1007 6 This program is free software; you can redistribute it and/or
james@1007 7 modify it under the terms of the GNU General Public License
james@1007 8 as published by the Free Software Foundation; either version 2
james@1007 9 of the License, or (at your option) any later version.
james@1007 10
james@1007 11 This program is distributed in the hope that it will be useful,
james@1007 12 but WITHOUT ANY WARRANTY; without even the implied warranty of
james@1007 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
james@1007 14 GNU General Public License for more details.
james@1007 15
james@1007 16 You should have received a copy of the GNU General Public License
james@1007 17 along with this program; if not, write to the Free Software
james@1007 18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
james@1007 19 */
james@1007 20
james@1007 21 #include "xennet.h"
james@1007 22
james@1007 23
james@1007 24 static USHORT
james@1007 25 get_id_from_freelist(struct xennet_info *xi)
james@1007 26 {
james@1023 27 XN_ASSERT(xi->tx_id_free);
james@1007 28 xi->tx_id_free--;
james@1007 29
james@1007 30 return xi->tx_id_list[xi->tx_id_free];
james@1007 31 }
james@1007 32
james@1007 33 static VOID
james@1007 34 put_id_on_freelist(struct xennet_info *xi, USHORT id)
james@1007 35 {
james@1007 36 xi->tx_id_list[xi->tx_id_free] = id;
james@1007 37 xi->tx_id_free++;
james@1007 38 }
james@1007 39
james@1007 40 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
james@1007 41
james@1007 42 static __forceinline struct netif_tx_request *
james@1007 43 XenNet_PutCbOnRing(struct xennet_info *xi, PVOID coalesce_buf, ULONG length, grant_ref_t gref)
james@1007 44 {
james@1007 45 struct netif_tx_request *tx;
james@1007 46 tx = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
james@1007 47 xi->tx_ring.req_prod_pvt++;
james@1007 48 xi->tx_ring_free--;
james@1007 49 tx->id = get_id_from_freelist(xi);
james@1023 50 XN_ASSERT(xi->tx_shadows[tx->id].gref == INVALID_GRANT_REF);
james@1023 51 XN_ASSERT(!xi->tx_shadows[tx->id].cb);
james@1007 52 xi->tx_shadows[tx->id].cb = coalesce_buf;
james@1007 53 tx->gref = XnGrantAccess(xi->handle, (ULONG)(MmGetPhysicalAddress(coalesce_buf).QuadPart >> PAGE_SHIFT), FALSE, gref, (ULONG)'XNTX');
james@1007 54 xi->tx_shadows[tx->id].gref = tx->gref;
james@1007 55 tx->offset = 0;
james@1007 56 tx->size = (USHORT)length;
james@1023 57 XN_ASSERT(tx->offset + tx->size <= PAGE_SIZE);
james@1023 58 XN_ASSERT(tx->size);
james@1007 59 return tx;
james@1007 60 }
james@1007 61
james@1011 62 #if 0
james@1007 63 static VOID dump_packet_data(PNDIS_PACKET packet, PCHAR header) {
james@1007 64 UINT mdl_count;
james@1007 65 PMDL first_mdl;
james@1007 66 UINT total_length;
james@1007 67
james@1007 68 NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &first_mdl, (PUINT)&total_length);
james@1007 69 FUNCTION_MSG("%s mdl_count = %d, first_mdl = %p, total_length = %d\n", header, mdl_count, first_mdl, total_length);
james@1007 70 }
james@1011 71 #endif
james@1007 72
james@1007 73 /* Called at DISPATCH_LEVEL with tx_lock held */
james@1007 74 /*
james@1007 75 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
james@1007 76 */
james@1011 77 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 78 static BOOLEAN
james@1011 79 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet) {
james@1011 80 #else
james@1011 81 static BOOLEAN
james@1011 82 XenNet_HWSendPacket(struct xennet_info *xi, PNET_BUFFER packet) {
james@1011 83 #endif
james@1007 84 struct netif_tx_request *tx0 = NULL;
james@1007 85 struct netif_tx_request *txN = NULL;
james@1007 86 struct netif_extra_info *ei = NULL;
james@1007 87 ULONG mss = 0;
james@1007 88 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 89 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
james@1011 90 UINT mdl_count;
james@1007 91 #else
james@1007 92 NDIS_TCP_LARGE_SEND_OFFLOAD_NET_BUFFER_LIST_INFO lso_info;
james@1007 93 NDIS_TCP_IP_CHECKSUM_NET_BUFFER_LIST_INFO csum_info;
james@1007 94 #endif
james@1007 95 uint16_t flags = NETTXF_more_data;
james@1007 96 packet_info_t pi;
james@1007 97 BOOLEAN ndis_lso = FALSE;
james@1007 98 BOOLEAN xen_gso = FALSE;
james@1007 99 ULONG remaining;
james@1007 100 ULONG frags = 0;
james@1007 101 BOOLEAN coalesce_required = FALSE;
james@1007 102 PVOID coalesce_buf;
james@1007 103 ULONG coalesce_remaining = 0;
james@1007 104 grant_ref_t gref;
james@1007 105 ULONG tx_length = 0;
james@1007 106
james@1007 107 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
james@1007 108 if (gref == INVALID_GRANT_REF)
james@1007 109 {
james@1007 110 FUNCTION_MSG("out of grefs\n");
james@1007 111 return FALSE;
james@1007 112 }
james@1007 113 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
james@1007 114 if (!coalesce_buf)
james@1007 115 {
james@1007 116 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
james@1007 117 FUNCTION_MSG("out of memory\n");
james@1007 118 return FALSE;
james@1007 119 }
james@1007 120 XenNet_ClearPacketInfo(&pi);
james@1007 121 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 122 NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &pi.first_mdl, (PUINT)&pi.total_length);
james@1007 123 pi.curr_mdl = pi.first_mdl;
james@1007 124 #else
james@1007 125 /* create a new MDL over the data portion of the first MDL in the packet... it's just easier this way */
james@1007 126 IoBuildPartialMdl(packet->CurrentMdl,
james@1007 127 &pi.first_mdl_storage,
james@1011 128 (PUCHAR)MmGetMdlVirtualAddress(packet->CurrentMdl) + packet->CurrentMdlOffset,
james@1011 129 MmGetMdlByteCount(packet->CurrentMdl) - packet->CurrentMdlOffset);
james@1011 130 pi.total_length = packet->DataLength;
james@1011 131 pi.first_mdl_storage.Next = packet->CurrentMdl->Next;
james@1007 132 pi.first_mdl = pi.curr_mdl = &pi.first_mdl_storage;
james@1007 133 #endif
james@1007 134 pi.first_mdl_offset = pi.curr_mdl_offset = 0;
james@1007 135 remaining = min(pi.total_length, PAGE_SIZE);
james@1007 136 while (remaining) { /* this much gets put in the header */
james@1007 137 ULONG length = XenNet_QueryData(&pi, remaining);
james@1007 138 remaining -= length;
james@1007 139 XenNet_EatData(&pi, length);
james@1007 140 }
james@1007 141 frags++;
james@1007 142 if (pi.total_length > PAGE_SIZE) { /* these are the frags we care about */
james@1007 143 remaining = pi.total_length - PAGE_SIZE;
james@1007 144 while (remaining) {
james@1007 145 ULONG length = XenNet_QueryData(&pi, PAGE_SIZE);
james@1007 146 if (length != 0) {
james@1007 147 frags++;
james@1007 148 if (frags > LINUX_MAX_SG_ELEMENTS)
james@1007 149 break; /* worst case there could be hundreds of fragments - leave the loop now */
james@1007 150 }
james@1007 151 remaining -= length;
james@1007 152 XenNet_EatData(&pi, length);
james@1007 153 }
james@1007 154 }
james@1007 155 if (frags > LINUX_MAX_SG_ELEMENTS) {
james@1007 156 frags = LINUX_MAX_SG_ELEMENTS;
james@1007 157 coalesce_required = TRUE;
james@1007 158 }
james@1007 159
james@1007 160 /* if we have enough space on the ring then we have enough id's so no need to check for that */
james@1007 161 if (xi->tx_ring_free < frags + 1) {
james@1007 162 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
james@1007 163 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
james@1007 164 //FUNCTION_MSG("Full on send - ring full\n");
james@1007 165 return FALSE;
james@1007 166 }
james@1007 167 XenNet_ParsePacketHeader(&pi, coalesce_buf, PAGE_SIZE);
james@1007 168 remaining = pi.total_length - pi.header_length;
james@1007 169 if (pi.ip_version == 4 && pi.ip_proto == 6 && pi.ip4_length == 0) {
james@1007 170 *((PUSHORT)(pi.header + 0x10)) = GET_NET_USHORT((USHORT)pi.total_length - XN_HDR_SIZE);
james@1007 171 }
james@1007 172
james@1007 173 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 174 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP) {
james@1007 175 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
james@1007 176 packet, TcpIpChecksumPacketInfo);
james@1007 177 if (csum_info->Transmit.NdisPacketChecksumV4) {
james@1007 178 if (csum_info->Transmit.NdisPacketTcpChecksum) {
james@1007 179 flags |= NETTXF_csum_blank | NETTXF_data_validated;
james@1007 180 } else if (csum_info->Transmit.NdisPacketUdpChecksum) {
james@1007 181 flags |= NETTXF_csum_blank | NETTXF_data_validated;
james@1007 182 }
james@1007 183 }
james@1007 184 }
james@1007 185 #else
james@1011 186 csum_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(packet), TcpIpChecksumNetBufferListInfo);
james@1007 187 if (csum_info.Transmit.IsIPv4) {
james@1007 188 if (csum_info.Transmit.TcpChecksum) {
james@1007 189 flags |= NETTXF_csum_blank | NETTXF_data_validated;
james@1007 190 } else if (csum_info.Transmit.UdpChecksum) {
james@1007 191 flags |= NETTXF_csum_blank | NETTXF_data_validated;
james@1007 192 }
james@1007 193 } else if (csum_info.Transmit.IsIPv6) {
james@1007 194 KdPrint((__DRIVER_NAME " Transmit.IsIPv6 not supported\n"));
james@1007 195 }
james@1007 196 #endif
james@1007 197
james@1007 198 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 199 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
james@1007 200 #else
james@1011 201 lso_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(packet), TcpLargeSendNetBufferListInfo);
james@1007 202 switch (lso_info.Transmit.Type) {
james@1007 203 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
james@1007 204 mss = lso_info.LsoV1Transmit.MSS;
james@1007 205 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
james@1007 206 break;
james@1007 207 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
james@1007 208 mss = lso_info.LsoV2Transmit.MSS;
james@1007 209 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
james@1007 210 break;
james@1007 211 }
james@1007 212 #endif
james@1007 213 if (mss && pi.parse_result == PARSE_OK) {
james@1007 214 ndis_lso = TRUE;
james@1007 215 }
james@1007 216
james@1007 217 if (ndis_lso) {
james@1007 218 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
james@1007 219 if (pi.tcp_length >= mss) {
james@1007 220 flags |= NETTXF_extra_info;
james@1007 221 xen_gso = TRUE;
james@1007 222 }
james@1007 223 }
james@1007 224 /*
james@1007 225 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
james@1007 226 * (C) rest of requests on the ring. Only (A) has csum flags.
james@1007 227 */
james@1007 228
james@1007 229 /* (A) */
james@1007 230 tx0 = XenNet_PutCbOnRing(xi, coalesce_buf, pi.header_length, gref);
james@1023 231 XN_ASSERT(tx0); /* this will never happen */
james@1007 232 tx0->flags = flags;
james@1007 233 tx_length += pi.header_length;
james@1007 234
james@1007 235 /* lso implies IpHeaderChecksum */
james@1007 236 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 237 if (ndis_lso) {
james@1007 238 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
james@1007 239 }
james@1007 240 #else
james@1007 241 if (ndis_lso || csum_info.Transmit.IpHeaderChecksum) {
james@1007 242 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
james@1007 243 }
james@1007 244 #endif
james@1007 245 txN = tx0;
james@1007 246
james@1007 247 /* (B) */
james@1007 248 if (xen_gso) {
james@1023 249 XN_ASSERT(flags & NETTXF_extra_info);
james@1007 250 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
james@1007 251 //KdPrint((__DRIVER_NAME " pos = %d\n", xi->tx_ring.req_prod_pvt));
james@1007 252 xi->tx_ring.req_prod_pvt++;
james@1007 253 xi->tx_ring_free--;
james@1007 254 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
james@1007 255 ei->flags = 0;
james@1007 256 ei->u.gso.size = (USHORT)mss;
james@1007 257 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
james@1007 258 ei->u.gso.pad = 0;
james@1007 259 ei->u.gso.features = 0;
james@1007 260 }
james@1007 261
james@1023 262 XN_ASSERT(xi->current_sg_supported || !remaining);
james@1007 263
james@1007 264 /* (C) - only if data is remaining */
james@1007 265 coalesce_buf = NULL;
james@1007 266 while (remaining > 0) {
james@1007 267 ULONG length;
james@1007 268 PFN_NUMBER pfn;
james@1007 269
james@1023 270 XN_ASSERT(pi.curr_mdl);
james@1007 271 if (coalesce_required) {
james@1007 272 PVOID va;
james@1007 273 if (!coalesce_buf) {
james@1007 274 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
james@1007 275 if (gref == INVALID_GRANT_REF) {
james@1007 276 KdPrint((__DRIVER_NAME " out of grefs - partial send\n"));
james@1007 277 break;
james@1007 278 }
james@1007 279 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
james@1007 280 if (!coalesce_buf) {
james@1007 281 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
james@1007 282 KdPrint((__DRIVER_NAME " out of memory - partial send\n"));
james@1007 283 break;
james@1007 284 }
james@1007 285 coalesce_remaining = min(PAGE_SIZE, remaining);
james@1007 286 }
james@1007 287 length = XenNet_QueryData(&pi, coalesce_remaining);
james@1007 288 va = NdisBufferVirtualAddressSafe(pi.curr_mdl, LowPagePriority);
james@1007 289 if (!va) {
james@1007 290 KdPrint((__DRIVER_NAME " failed to map buffer va - partial send\n"));
james@1007 291 coalesce_remaining = 0;
james@1007 292 remaining -= min(PAGE_SIZE, remaining);
james@1007 293 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
james@1007 294 } else {
james@1007 295 memcpy((PUCHAR)coalesce_buf + min(PAGE_SIZE, remaining) - coalesce_remaining, (PUCHAR)va + pi.curr_mdl_offset, length);
james@1007 296 coalesce_remaining -= length;
james@1007 297 }
james@1007 298 } else {
james@1007 299 length = XenNet_QueryData(&pi, PAGE_SIZE);
james@1007 300 }
james@1007 301 if (!length || coalesce_remaining) { /* sometimes there are zero length buffers... */
james@1007 302 XenNet_EatData(&pi, length); /* do this so we actually move to the next buffer */
james@1007 303 continue;
james@1007 304 }
james@1007 305
james@1007 306 if (coalesce_buf) {
james@1007 307 if (remaining) {
james@1007 308 txN = XenNet_PutCbOnRing(xi, coalesce_buf, min(PAGE_SIZE, remaining), gref);
james@1023 309 XN_ASSERT(txN);
james@1007 310 coalesce_buf = NULL;
james@1007 311 tx_length += min(PAGE_SIZE, remaining);
james@1007 312 remaining -= min(PAGE_SIZE, remaining);
james@1007 313 }
james@1007 314 } else {
james@1007 315 ULONG offset;
james@1007 316
james@1007 317 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
james@1007 318 if (gref == INVALID_GRANT_REF) {
james@1007 319 KdPrint((__DRIVER_NAME " out of grefs - partial send\n"));
james@1007 320 break;
james@1007 321 }
james@1007 322 txN = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
james@1007 323 xi->tx_ring.req_prod_pvt++;
james@1007 324 xi->tx_ring_free--;
james@1007 325 txN->id = get_id_from_freelist(xi);
james@1023 326 XN_ASSERT(!xi->tx_shadows[txN->id].cb);
james@1007 327 offset = MmGetMdlByteOffset(pi.curr_mdl) + pi.curr_mdl_offset;
james@1007 328 pfn = MmGetMdlPfnArray(pi.curr_mdl)[offset >> PAGE_SHIFT];
james@1007 329 txN->offset = (USHORT)offset & (PAGE_SIZE - 1);
james@1007 330 txN->gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, gref, (ULONG)'XNTX');
james@1023 331 XN_ASSERT(xi->tx_shadows[txN->id].gref == INVALID_GRANT_REF);
james@1007 332 xi->tx_shadows[txN->id].gref = txN->gref;
james@1007 333 //ASSERT(sg->Elements[sg_element].Length > sg_offset);
james@1007 334 txN->size = (USHORT)length;
james@1023 335 XN_ASSERT(txN->offset + txN->size <= PAGE_SIZE);
james@1023 336 XN_ASSERT(txN->size);
james@1023 337 XN_ASSERT(txN->gref != INVALID_GRANT_REF);
james@1007 338 remaining -= length;
james@1007 339 tx_length += length;
james@1007 340 }
james@1007 341 tx0->size = tx0->size + txN->size;
james@1007 342 txN->flags = NETTXF_more_data;
james@1007 343 XenNet_EatData(&pi, length);
james@1007 344 }
james@1007 345 txN->flags &= ~NETTXF_more_data;
james@1023 346 XN_ASSERT(tx0->size == pi.total_length);
james@1023 347 XN_ASSERT(!xi->tx_shadows[txN->id].packet);
james@1007 348 xi->tx_shadows[txN->id].packet = packet;
james@1007 349
james@1007 350 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 351 if (ndis_lso) {
james@1007 352 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length);
james@1007 353 }
james@1007 354 #else
james@1007 355 switch (lso_info.Transmit.Type) {
james@1007 356 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
james@1007 357 lso_info.LsoV1TransmitComplete.TcpPayload = tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length;
james@1007 358 break;
james@1007 359 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
james@1007 360 break;
james@1007 361 }
james@1007 362 #endif
james@1007 363
james@1007 364 xi->tx_outstanding++;
james@1007 365 return TRUE;
james@1007 366 }
james@1007 367
james@1007 368 /* Called at DISPATCH_LEVEL with tx_lock held */
james@1007 369 static VOID
james@1007 370 XenNet_SendQueuedPackets(struct xennet_info *xi)
james@1007 371 {
james@1007 372 PLIST_ENTRY entry;
james@1007 373 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 374 PNDIS_PACKET packet;
james@1007 375 #else
james@1007 376 PNET_BUFFER packet;
james@1007 377 #endif
james@1007 378 int notify;
james@1007 379
james@1007 380 if (xi->device_state != DEVICE_STATE_ACTIVE)
james@1007 381 return;
james@1007 382
james@1007 383 while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
james@1007 384 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
james@1007 385 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 386 packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
james@1007 387 #else
james@1007 388 packet = CONTAINING_RECORD(entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
james@1007 389 #endif
james@1007 390 if (!XenNet_HWSendPacket(xi, packet)) {
james@1007 391 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
james@1007 392 break;
james@1007 393 }
james@1007 394 }
james@1007 395
james@1007 396 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx_ring, notify);
james@1007 397 if (notify) {
james@1007 398 XnNotify(xi->handle, xi->event_channel);
james@1007 399 }
james@1007 400 }
james@1007 401
james@1007 402 // Called at DISPATCH_LEVEL
james@1007 403 VOID
james@1007 404 XenNet_TxBufferGC(struct xennet_info *xi, BOOLEAN dont_set_event) {
james@1007 405 RING_IDX cons, prod;
james@1007 406 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 407 PNDIS_PACKET head = NULL, tail = NULL;
james@1007 408 PNDIS_PACKET packet;
james@1007 409 #else
james@1007 410 PNET_BUFFER_LIST head = NULL;
james@1007 411 PNET_BUFFER_LIST tail = NULL;
james@1007 412 PNET_BUFFER_LIST nbl;
james@1007 413 PNET_BUFFER packet;
james@1007 414 #endif
james@1007 415 ULONG tx_packets = 0;
james@1007 416
james@1023 417 XN_ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
james@1007 418
james@1007 419 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
james@1007 420
james@1007 421 if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
james@1007 422 /* there is a chance that our Dpc had been queued just before the shutdown... */
james@1007 423 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
james@1007 424 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
james@1007 425 return;
james@1007 426 }
james@1007 427
james@1007 428 do {
james@1007 429 prod = xi->tx_ring.sring->rsp_prod;
james@1007 430 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
james@1007 431
james@1007 432 for (cons = xi->tx_ring.rsp_cons; cons != prod; cons++)
james@1007 433 {
james@1007 434 struct netif_tx_response *txrsp;
james@1007 435 tx_shadow_t *shadow;
james@1007 436
james@1007 437 txrsp = RING_GET_RESPONSE(&xi->tx_ring, cons);
james@1007 438
james@1007 439 xi->tx_ring_free++;
james@1007 440
james@1007 441 if (txrsp->status == NETIF_RSP_NULL) {
james@1007 442 continue;
james@1007 443 }
james@1007 444
james@1007 445 shadow = &xi->tx_shadows[txrsp->id];
james@1007 446 if (shadow->cb) {
james@1007 447 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, shadow->cb);
james@1007 448 shadow->cb = NULL;
james@1007 449 }
james@1007 450
james@1007 451 if (shadow->gref != INVALID_GRANT_REF) {
james@1007 452 XnEndAccess(xi->handle, shadow->gref, FALSE, (ULONG)'XNTX');
james@1007 453 shadow->gref = INVALID_GRANT_REF;
james@1007 454 }
james@1007 455
james@1007 456 if (shadow->packet) {
james@1007 457 PMDL mdl;
james@1007 458 PUCHAR header;
james@1007 459 packet = shadow->packet;
james@1007 460 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 461 mdl = NDIS_PACKET_FIRST_NDIS_BUFFER(packet);
james@1007 462 #else
james@1007 463 mdl = NET_BUFFER_CURRENT_MDL(packet);
james@1007 464 #endif
james@1007 465 #pragma warning(suppress:28193) /* already mapped so guaranteed to work */
james@1007 466 header = MmGetSystemAddressForMdlSafe(mdl, LowPagePriority);
james@1007 467 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 468 #else
james@1011 469 header += NET_BUFFER_CURRENT_MDL_OFFSET(packet);
james@1007 470 #endif
james@1007 471
james@1007 472 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 473 #else
james@1011 474 xi->stats.ifHCOutOctets += packet->DataLength;
james@1011 475 if (packet->DataLength < XN_HDR_SIZE || !(header[0] & 0x01)) {
james@1007 476 /* unicast or tiny packet */
james@1007 477 xi->stats.ifHCOutUcastPkts++;
james@1011 478 xi->stats.ifHCOutUcastOctets += packet->DataLength;
james@1007 479 }
james@1007 480 else if (header[0] == 0xFF && header[1] == 0xFF && header[2] == 0xFF
james@1007 481 && header[3] == 0xFF && header[4] == 0xFF && header[5] == 0xFF) {
james@1007 482 /* broadcast */
james@1007 483 xi->stats.ifHCOutBroadcastPkts++;
james@1011 484 xi->stats.ifHCOutBroadcastOctets += packet->DataLength;
james@1007 485 } else {
james@1007 486 /* multicast */
james@1007 487 xi->stats.ifHCOutMulticastPkts++;
james@1011 488 xi->stats.ifHCOutMulticastOctets += packet->DataLength;
james@1007 489 }
james@1007 490 #endif
james@1007 491
james@1007 492 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 493 PACKET_NEXT_PACKET(packet) = NULL;
james@1007 494 if (!head) {
james@1007 495 head = packet;
james@1007 496 } else {
james@1007 497 PACKET_NEXT_PACKET(tail) = packet;
james@1007 498 }
james@1007 499 tail = packet;
james@1007 500 #else
james@1011 501 nbl = NB_NBL(packet);
james@1007 502 NBL_REF(nbl)--;
james@1007 503 if (!NBL_REF(nbl)) {
james@1007 504 NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL;
james@1011 505 if (head) {
james@1011 506 NET_BUFFER_LIST_NEXT_NBL(tail) = nbl;
james@1011 507 tail = nbl;
james@1007 508 } else {
james@1011 509 head = nbl;
james@1011 510 tail = nbl;
james@1007 511 }
james@1007 512 }
james@1007 513 #endif
james@1007 514 shadow->packet = NULL;
james@1007 515 tx_packets++;
james@1007 516 }
james@1007 517 put_id_on_freelist(xi, txrsp->id);
james@1007 518 }
james@1007 519
james@1007 520 xi->tx_ring.rsp_cons = prod;
james@1007 521 /* resist the temptation to set the event more than +1... it breaks things */
james@1007 522 if (!dont_set_event)
james@1007 523 xi->tx_ring.sring->rsp_event = prod + 1;
james@1007 524 KeMemoryBarrier();
james@1007 525 } while (prod != xi->tx_ring.sring->rsp_prod);
james@1007 526
james@1007 527 /* if queued packets, send them now */
james@1007 528 XenNet_SendQueuedPackets(xi);
james@1007 529
james@1007 530 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
james@1007 531
james@1007 532 /* must be done without holding any locks */
james@1007 533 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 534 while (head) {
james@1007 535 packet = (PNDIS_PACKET)head;
james@1007 536 head = PACKET_NEXT_PACKET(packet);
james@1007 537 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
james@1007 538 }
james@1007 539 #else
james@1011 540 if (head)
james@1011 541 NdisMSendNetBufferListsComplete(xi->adapter_handle, head, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
james@1007 542 #endif
james@1007 543
james@1007 544 /* must be done after we have truly given back all packets */
james@1007 545 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
james@1007 546 xi->tx_outstanding -= tx_packets;
james@1007 547 if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
james@1007 548 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
james@1007 549 }
james@1007 550 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
james@1007 551 }
james@1007 552
james@1007 553 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 554 VOID
james@1007 555 XenNet_SendPackets(NDIS_HANDLE MiniportAdapterContext, PPNDIS_PACKET PacketArray, UINT NumberOfPackets) {
james@1007 556 struct xennet_info *xi = MiniportAdapterContext;
james@1007 557 PNDIS_PACKET packet;
james@1007 558 UINT i;
james@1007 559 PLIST_ENTRY entry;
james@1007 560 KIRQL old_irql;
james@1007 561
james@1007 562 if (xi->device_state != DEVICE_STATE_ACTIVE) {
james@1007 563 for (i = 0; i < NumberOfPackets; i++) {
james@1007 564 NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
james@1007 565 }
james@1007 566 return;
james@1007 567 }
james@1007 568
james@1007 569 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
james@1007 570
james@1007 571 for (i = 0; i < NumberOfPackets; i++) {
james@1007 572 packet = PacketArray[i];
james@1023 573 XN_ASSERT(packet);
james@1007 574 entry = &PACKET_LIST_ENTRY(packet);
james@1007 575 InsertTailList(&xi->tx_waiting_pkt_list, entry);
james@1007 576 }
james@1007 577
james@1007 578 XenNet_SendQueuedPackets(xi);
james@1007 579
james@1007 580 KeReleaseSpinLock(&xi->tx_lock, old_irql);
james@1007 581 }
james@914 582 #else
james@1007 583 // called at <= DISPATCH_LEVEL
james@1007 584 VOID
james@1007 585 XenNet_SendNetBufferLists(
james@1007 586 NDIS_HANDLE adapter_context,
james@1007 587 PNET_BUFFER_LIST nb_lists,
james@1007 588 NDIS_PORT_NUMBER port_number,
james@1007 589 ULONG send_flags) {
james@1007 590 struct xennet_info *xi = adapter_context;
james@1007 591 PLIST_ENTRY nb_entry;
james@1007 592 KIRQL old_irql;
james@1007 593 PNET_BUFFER_LIST curr_nbl;
james@1007 594 PNET_BUFFER_LIST next_nbl;
james@1007 595
james@1007 596 UNREFERENCED_PARAMETER(port_number);
james@1007 597
james@1007 598 if (xi->device_state == DEVICE_STATE_INACTIVE) {
james@1007 599 curr_nbl = nb_lists;
james@1007 600 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl)) {
james@1007 601 curr_nbl->Status = NDIS_STATUS_FAILURE;
james@1007 602 }
james@1007 603 /* this actions the whole list */
james@1007 604 NdisMSendNetBufferListsComplete(xi->adapter_handle, nb_lists, (send_flags & NDIS_SEND_FLAGS_DISPATCH_LEVEL)?NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL:0);
james@1007 605 return;
james@1007 606 }
james@1007 607
james@1007 608 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
james@1007 609
james@1007 610 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = next_nbl) {
james@1007 611 PNET_BUFFER curr_nb;
james@1007 612 NBL_REF(curr_nbl) = 0;
james@1007 613 next_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl);
james@1007 614 NET_BUFFER_LIST_NEXT_NBL(curr_nbl) = NULL;
james@1007 615 for (curr_nb = NET_BUFFER_LIST_FIRST_NB(curr_nbl); curr_nb; curr_nb = NET_BUFFER_NEXT_NB(curr_nb)) {
james@1007 616 NB_NBL(curr_nb) = curr_nbl;
james@1007 617 nb_entry = &NB_LIST_ENTRY(curr_nb);
james@1007 618 InsertTailList(&xi->tx_waiting_pkt_list, nb_entry);
james@1007 619 NBL_REF(curr_nbl)++;
james@1007 620 }
james@1007 621 }
james@1007 622
james@1007 623 XenNet_SendQueuedPackets(xi);
james@1007 624
james@1007 625 KeReleaseSpinLock(&xi->tx_lock, old_irql);
james@1007 626 }
james@821 627 #endif
james@1007 628
james@1007 629 VOID
james@1007 630 XenNet_CancelSend(NDIS_HANDLE adapter_context, PVOID cancel_id)
james@1007 631 {
james@1007 632 UNREFERENCED_PARAMETER(adapter_context);
james@1007 633 UNREFERENCED_PARAMETER(cancel_id);
james@1007 634 FUNCTION_ENTER();
james@1007 635
james@1007 636 FUNCTION_EXIT();
james@1007 637 }
james@1007 638
james@1007 639 BOOLEAN
james@1007 640 XenNet_TxInit(xennet_info_t *xi) {
james@1007 641 USHORT i;
james@1007 642 UNREFERENCED_PARAMETER(xi);
james@1007 643
james@1007 644 KeInitializeSpinLock(&xi->tx_lock);
james@1007 645 InitializeListHead(&xi->tx_waiting_pkt_list);
james@1007 646
james@1007 647 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
james@1007 648 xi->tx_outstanding = 0;
james@1007 649 xi->tx_ring_free = NET_TX_RING_SIZE;
james@1007 650
james@1007 651 NdisInitializeNPagedLookasideList(&xi->tx_lookaside_list, NULL, NULL, 0,
james@1007 652 PAGE_SIZE, XENNET_POOL_TAG, 0);
james@1007 653
james@1007 654 xi->tx_id_free = 0;
james@1007 655 for (i = 0; i < NET_TX_RING_SIZE; i++) {
james@1007 656 xi->tx_shadows[i].gref = INVALID_GRANT_REF;
james@1007 657 xi->tx_shadows[i].cb = NULL;
james@1007 658 put_id_on_freelist(xi, i);
james@1007 659 }
james@1007 660
james@1007 661 return TRUE;
james@1007 662 }
james@1007 663
james@1007 664 /*
james@1007 665 The ring is completely closed down now. We just need to empty anything left
james@1007 666 on our freelists and harvest anything left on the rings.
james@1007 667 */
james@1007 668
james@1007 669 BOOLEAN
james@1007 670 XenNet_TxShutdown(xennet_info_t *xi) {
james@1007 671 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 672 PNDIS_PACKET packet;
james@1007 673 #else
james@1007 674 PNET_BUFFER packet;
james@1007 675 PNET_BUFFER_LIST nbl;
james@1007 676 #endif
james@1007 677 PLIST_ENTRY entry;
james@1007 678 LARGE_INTEGER timeout;
james@1007 679 KIRQL old_irql;
james@1007 680
james@1007 681 FUNCTION_ENTER();
james@1007 682
james@1007 683 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
james@1007 684
james@1007 685 while (xi->tx_outstanding) {
james@1007 686 KeReleaseSpinLock(&xi->tx_lock, old_irql);
james@1007 687 KdPrint((__DRIVER_NAME " Waiting for %d remaining packets to be sent\n", xi->tx_outstanding));
james@1007 688 timeout.QuadPart = -1 * 1 * 1000 * 1000 * 10; /* 1 second */
james@1007 689 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, &timeout);
james@1007 690 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
james@1007 691 }
james@1007 692 KeReleaseSpinLock(&xi->tx_lock, old_irql);
james@1007 693
james@1007 694 /* Free packets in tx queue */
james@1007 695 while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
james@1007 696 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
james@1007 697 #if NTDDI_VERSION < NTDDI_VISTA
james@1007 698 packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
james@1007 699 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
james@1007 700 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
james@1007 701 #else
james@1011 702 packet = CONTAINING_RECORD(entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
james@1011 703 nbl = NB_NBL(packet);
james@1007 704 NBL_REF(nbl)--;
james@1007 705 if (!NBL_REF(nbl)) {
james@1007 706 nbl->Status = NDIS_STATUS_FAILURE;
james@1007 707 NdisMSendNetBufferListsComplete(xi->adapter_handle, nbl, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
james@1007 708 }
james@1007 709 #endif
james@1007 710 }
james@1007 711 NdisDeleteNPagedLookasideList(&xi->tx_lookaside_list);
james@1007 712
james@1007 713 FUNCTION_EXIT();
james@1007 714
james@1007 715 return TRUE;
james@1007 716 }