win-pvdrivers

view xennet/xennet_tx.c @ 1070:05ece536b204

Fix LSO bug on FIN packets. Add RxCoalesce option (default on) to work around Cisco VPN issues
author James Harper <james.harper@bendigoit.com.au>
date Wed Nov 13 07:56:13 2013 +1100 (2013-11-13)
parents 00d29add6a2a
children 83201dc2ea3f
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
24 static USHORT
25 get_id_from_freelist(struct xennet_info *xi)
26 {
27 XN_ASSERT(xi->tx_id_free);
28 xi->tx_id_free--;
30 return xi->tx_id_list[xi->tx_id_free];
31 }
33 static VOID
34 put_id_on_freelist(struct xennet_info *xi, USHORT id)
35 {
36 xi->tx_id_list[xi->tx_id_free] = id;
37 xi->tx_id_free++;
38 }
40 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
42 static __forceinline struct netif_tx_request *
43 XenNet_PutCbOnRing(struct xennet_info *xi, PVOID coalesce_buf, ULONG length, grant_ref_t gref)
44 {
45 struct netif_tx_request *tx;
46 tx = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
47 xi->tx_ring.req_prod_pvt++;
48 xi->tx_ring_free--;
49 tx->id = get_id_from_freelist(xi);
50 XN_ASSERT(xi->tx_shadows[tx->id].gref == INVALID_GRANT_REF);
51 XN_ASSERT(!xi->tx_shadows[tx->id].cb);
52 xi->tx_shadows[tx->id].cb = coalesce_buf;
53 tx->gref = XnGrantAccess(xi->handle, (ULONG)(MmGetPhysicalAddress(coalesce_buf).QuadPart >> PAGE_SHIFT), FALSE, gref, (ULONG)'XNTX');
54 xi->tx_shadows[tx->id].gref = tx->gref;
55 tx->offset = 0;
56 tx->size = (USHORT)length;
57 XN_ASSERT(tx->offset + tx->size <= PAGE_SIZE);
58 XN_ASSERT(tx->size);
59 return tx;
60 }
62 #if 0
63 static VOID dump_packet_data(PNDIS_PACKET packet, PCHAR header) {
64 UINT mdl_count;
65 PMDL first_mdl;
66 UINT total_length;
68 NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &first_mdl, (PUINT)&total_length);
69 FUNCTION_MSG("%s mdl_count = %d, first_mdl = %p, total_length = %d\n", header, mdl_count, first_mdl, total_length);
70 }
71 #endif
73 /* Called at DISPATCH_LEVEL with tx_lock held */
74 /*
75 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
76 */
77 #if NTDDI_VERSION < NTDDI_VISTA
78 static BOOLEAN
79 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet) {
80 #else
81 static BOOLEAN
82 XenNet_HWSendPacket(struct xennet_info *xi, PNET_BUFFER packet) {
83 #endif
84 struct netif_tx_request *tx0 = NULL;
85 struct netif_tx_request *txN = NULL;
86 struct netif_extra_info *ei = NULL;
87 ULONG mss = 0;
88 #if NTDDI_VERSION < NTDDI_VISTA
89 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
90 UINT mdl_count;
91 #else
92 NDIS_TCP_LARGE_SEND_OFFLOAD_NET_BUFFER_LIST_INFO lso_info;
93 NDIS_TCP_IP_CHECKSUM_NET_BUFFER_LIST_INFO csum_info;
94 #endif
95 uint16_t flags = NETTXF_more_data;
96 packet_info_t pi;
97 BOOLEAN ndis_lso = FALSE;
98 BOOLEAN xen_gso = FALSE;
99 ULONG remaining;
100 ULONG frags = 0;
101 BOOLEAN coalesce_required = FALSE;
102 PVOID coalesce_buf;
103 ULONG coalesce_remaining = 0;
104 grant_ref_t gref;
105 ULONG tx_length = 0;
107 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
108 if (gref == INVALID_GRANT_REF)
109 {
110 FUNCTION_MSG("out of grefs\n");
111 return FALSE;
112 }
113 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
114 if (!coalesce_buf)
115 {
116 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
117 FUNCTION_MSG("out of memory\n");
118 return FALSE;
119 }
120 XenNet_ClearPacketInfo(&pi);
121 #if NTDDI_VERSION < NTDDI_VISTA
122 NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &pi.first_mdl, (PUINT)&pi.total_length);
123 pi.curr_mdl = pi.first_mdl;
124 #else
125 /* create a new MDL over the data portion of the first MDL in the packet... it's just easier this way */
126 IoBuildPartialMdl(packet->CurrentMdl,
127 &pi.first_mdl_storage,
128 (PUCHAR)MmGetMdlVirtualAddress(packet->CurrentMdl) + packet->CurrentMdlOffset,
129 MmGetMdlByteCount(packet->CurrentMdl) - packet->CurrentMdlOffset);
130 pi.total_length = packet->DataLength;
131 pi.first_mdl_storage.Next = packet->CurrentMdl->Next;
132 pi.first_mdl = pi.curr_mdl = &pi.first_mdl_storage;
133 #endif
134 pi.first_mdl_offset = pi.curr_mdl_offset = 0;
135 remaining = min(pi.total_length, PAGE_SIZE);
136 while (remaining) { /* this much gets put in the header */
137 ULONG length = XenNet_QueryData(&pi, remaining);
138 remaining -= length;
139 XenNet_EatData(&pi, length);
140 }
141 frags++;
142 if (pi.total_length > PAGE_SIZE) { /* these are the frags we care about */
143 remaining = pi.total_length - PAGE_SIZE;
144 while (remaining) {
145 ULONG length = XenNet_QueryData(&pi, PAGE_SIZE);
146 if (length != 0) {
147 frags++;
148 if (frags > LINUX_MAX_SG_ELEMENTS)
149 break; /* worst case there could be hundreds of fragments - leave the loop now */
150 }
151 remaining -= length;
152 XenNet_EatData(&pi, length);
153 }
154 }
155 if (frags > LINUX_MAX_SG_ELEMENTS) {
156 frags = LINUX_MAX_SG_ELEMENTS;
157 coalesce_required = TRUE;
158 }
160 /* if we have enough space on the ring then we have enough id's so no need to check for that */
161 if (xi->tx_ring_free < frags + 1) {
162 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
163 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
164 //FUNCTION_MSG("Full on send - ring full\n");
165 return FALSE;
166 }
167 XenNet_ParsePacketHeader(&pi, coalesce_buf, PAGE_SIZE);
168 remaining = pi.total_length - pi.header_length;
169 if (pi.ip_version == 4 && pi.ip_proto == 6 && pi.ip4_length == 0) {
170 *((PUSHORT)(pi.header + 0x10)) = GET_NET_USHORT((USHORT)pi.total_length - XN_HDR_SIZE);
171 }
173 #if NTDDI_VERSION < NTDDI_VISTA
174 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP) {
175 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
176 packet, TcpIpChecksumPacketInfo);
177 if (csum_info->Transmit.NdisPacketChecksumV4) {
178 if (csum_info->Transmit.NdisPacketTcpChecksum) {
179 flags |= NETTXF_csum_blank | NETTXF_data_validated;
180 } else if (csum_info->Transmit.NdisPacketUdpChecksum) {
181 flags |= NETTXF_csum_blank | NETTXF_data_validated;
182 }
183 }
184 }
185 #else
186 csum_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(packet), TcpIpChecksumNetBufferListInfo);
187 if (csum_info.Transmit.IsIPv4) {
188 if (csum_info.Transmit.TcpChecksum) {
189 flags |= NETTXF_csum_blank | NETTXF_data_validated;
190 } else if (csum_info.Transmit.UdpChecksum) {
191 flags |= NETTXF_csum_blank | NETTXF_data_validated;
192 }
193 } else if (csum_info.Transmit.IsIPv6) {
194 FUNCTION_MSG("Transmit.IsIPv6 not supported\n");
195 }
196 #endif
198 #if NTDDI_VERSION < NTDDI_VISTA
199 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
200 #else
201 lso_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(packet), TcpLargeSendNetBufferListInfo);
202 switch (lso_info.Transmit.Type) {
203 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
204 mss = lso_info.LsoV1Transmit.MSS;
205 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
206 break;
207 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
208 mss = lso_info.LsoV2Transmit.MSS;
209 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
210 break;
211 }
212 #endif
213 if (mss && pi.parse_result == PARSE_OK) {
214 ndis_lso = TRUE;
215 }
217 if (ndis_lso) {
218 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
219 if (pi.tcp_length >= mss) {
220 flags |= NETTXF_extra_info;
221 xen_gso = TRUE;
222 }
223 }
224 /*
225 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
226 * (C) rest of requests on the ring. Only (A) has csum flags.
227 */
229 /* (A) */
230 tx0 = XenNet_PutCbOnRing(xi, coalesce_buf, pi.header_length, gref);
231 XN_ASSERT(tx0); /* this will never happen */
232 tx0->flags = flags;
233 tx_length += pi.header_length;
235 /* lso implies IpHeaderChecksum */
236 #if NTDDI_VERSION < NTDDI_VISTA
237 if (ndis_lso) {
238 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
239 }
240 #else
241 if (ndis_lso || csum_info.Transmit.IpHeaderChecksum) {
242 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
243 }
244 #endif
245 txN = tx0;
247 /* (B) */
248 if (xen_gso) {
249 XN_ASSERT(flags & NETTXF_extra_info);
250 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
251 //KdPrint((__DRIVER_NAME " pos = %d\n", xi->tx_ring.req_prod_pvt));
252 xi->tx_ring.req_prod_pvt++;
253 xi->tx_ring_free--;
254 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
255 ei->flags = 0;
256 ei->u.gso.size = (USHORT)mss;
257 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
258 ei->u.gso.pad = 0;
259 ei->u.gso.features = 0;
260 }
262 XN_ASSERT(xi->current_sg_supported || !remaining);
264 /* (C) - only if data is remaining */
265 coalesce_buf = NULL;
266 while (remaining > 0) {
267 ULONG length;
268 PFN_NUMBER pfn;
270 XN_ASSERT(pi.curr_mdl);
271 if (coalesce_required) {
272 PVOID va;
273 if (!coalesce_buf) {
274 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
275 if (gref == INVALID_GRANT_REF) {
276 FUNCTION_MSG("out of grefs - partial send\n");
277 break;
278 }
279 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
280 if (!coalesce_buf) {
281 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
282 FUNCTION_MSG("out of memory - partial send\n");
283 break;
284 }
285 coalesce_remaining = min(PAGE_SIZE, remaining);
286 }
287 length = XenNet_QueryData(&pi, coalesce_remaining);
288 va = NdisBufferVirtualAddressSafe(pi.curr_mdl, LowPagePriority);
289 if (!va) {
290 FUNCTION_MSG("failed to map buffer va - partial send\n");
291 coalesce_remaining = 0;
292 remaining -= min(PAGE_SIZE, remaining);
293 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
294 } else {
295 memcpy((PUCHAR)coalesce_buf + min(PAGE_SIZE, remaining) - coalesce_remaining, (PUCHAR)va + pi.curr_mdl_offset, length);
296 coalesce_remaining -= length;
297 }
298 } else {
299 length = XenNet_QueryData(&pi, PAGE_SIZE);
300 }
301 if (!length || coalesce_remaining) { /* sometimes there are zero length buffers... */
302 XenNet_EatData(&pi, length); /* do this so we actually move to the next buffer */
303 continue;
304 }
306 if (coalesce_buf) {
307 if (remaining) {
308 txN = XenNet_PutCbOnRing(xi, coalesce_buf, min(PAGE_SIZE, remaining), gref);
309 XN_ASSERT(txN);
310 coalesce_buf = NULL;
311 tx_length += min(PAGE_SIZE, remaining);
312 remaining -= min(PAGE_SIZE, remaining);
313 }
314 } else {
315 ULONG offset;
317 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
318 if (gref == INVALID_GRANT_REF) {
319 FUNCTION_MSG("out of grefs - partial send\n");
320 break;
321 }
322 txN = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
323 xi->tx_ring.req_prod_pvt++;
324 xi->tx_ring_free--;
325 txN->id = get_id_from_freelist(xi);
326 XN_ASSERT(!xi->tx_shadows[txN->id].cb);
327 offset = MmGetMdlByteOffset(pi.curr_mdl) + pi.curr_mdl_offset;
328 pfn = MmGetMdlPfnArray(pi.curr_mdl)[offset >> PAGE_SHIFT];
329 txN->offset = (USHORT)offset & (PAGE_SIZE - 1);
330 txN->gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, gref, (ULONG)'XNTX');
331 XN_ASSERT(xi->tx_shadows[txN->id].gref == INVALID_GRANT_REF);
332 xi->tx_shadows[txN->id].gref = txN->gref;
333 //ASSERT(sg->Elements[sg_element].Length > sg_offset);
334 txN->size = (USHORT)length;
335 XN_ASSERT(txN->offset + txN->size <= PAGE_SIZE);
336 XN_ASSERT(txN->size);
337 XN_ASSERT(txN->gref != INVALID_GRANT_REF);
338 remaining -= length;
339 tx_length += length;
340 }
341 tx0->size = tx0->size + txN->size;
342 txN->flags = NETTXF_more_data;
343 XenNet_EatData(&pi, length);
344 }
345 txN->flags &= ~NETTXF_more_data;
346 XN_ASSERT(tx0->size == pi.total_length);
347 XN_ASSERT(!xi->tx_shadows[txN->id].packet);
348 xi->tx_shadows[txN->id].packet = packet;
350 #if NTDDI_VERSION < NTDDI_VISTA
351 if (ndis_lso) {
352 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length);
353 }
354 #else
355 switch (lso_info.Transmit.Type) {
356 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
357 lso_info.LsoV1TransmitComplete.TcpPayload = tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length;
358 break;
359 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
360 break;
361 }
362 #endif
364 xi->tx_outstanding++;
365 return TRUE;
366 }
368 /* Called at DISPATCH_LEVEL with tx_lock held */
369 static VOID
370 XenNet_SendQueuedPackets(struct xennet_info *xi)
371 {
372 PLIST_ENTRY entry;
373 #if NTDDI_VERSION < NTDDI_VISTA
374 PNDIS_PACKET packet;
375 #else
376 PNET_BUFFER packet;
377 #endif
378 int notify;
380 if (xi->device_state != DEVICE_STATE_ACTIVE)
381 return;
383 while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
384 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
385 #if NTDDI_VERSION < NTDDI_VISTA
386 packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
387 #else
388 packet = CONTAINING_RECORD(entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
389 #endif
390 if (!XenNet_HWSendPacket(xi, packet)) {
391 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
392 break;
393 }
394 }
396 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx_ring, notify);
397 if (notify) {
398 XnNotify(xi->handle, xi->event_channel);
399 }
400 }
402 // Called at DISPATCH_LEVEL
403 VOID
404 XenNet_TxBufferGC(struct xennet_info *xi, BOOLEAN dont_set_event) {
405 RING_IDX cons, prod;
406 #if NTDDI_VERSION < NTDDI_VISTA
407 PNDIS_PACKET head = NULL, tail = NULL;
408 PNDIS_PACKET packet;
409 #else
410 PNET_BUFFER_LIST head = NULL;
411 PNET_BUFFER_LIST tail = NULL;
412 PNET_BUFFER_LIST nbl;
413 PNET_BUFFER packet;
414 #endif
415 ULONG tx_packets = 0;
417 XN_ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
419 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
421 if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
422 /* there is a chance that our Dpc had been queued just before the shutdown... */
423 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
424 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
425 return;
426 }
428 do {
429 prod = xi->tx_ring.sring->rsp_prod;
430 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
432 for (cons = xi->tx_ring.rsp_cons; cons != prod; cons++)
433 {
434 struct netif_tx_response *txrsp;
435 tx_shadow_t *shadow;
437 txrsp = RING_GET_RESPONSE(&xi->tx_ring, cons);
439 xi->tx_ring_free++;
441 if (txrsp->status == NETIF_RSP_NULL) {
442 continue;
443 }
445 shadow = &xi->tx_shadows[txrsp->id];
446 if (shadow->cb) {
447 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, shadow->cb);
448 shadow->cb = NULL;
449 }
451 if (shadow->gref != INVALID_GRANT_REF) {
452 XnEndAccess(xi->handle, shadow->gref, FALSE, (ULONG)'XNTX');
453 shadow->gref = INVALID_GRANT_REF;
454 }
456 if (shadow->packet) {
457 PMDL mdl;
458 PUCHAR header;
459 packet = shadow->packet;
460 #if NTDDI_VERSION < NTDDI_VISTA
461 mdl = NDIS_PACKET_FIRST_NDIS_BUFFER(packet);
462 #else
463 mdl = NET_BUFFER_CURRENT_MDL(packet);
464 #endif
465 #pragma warning(suppress:28193) /* already mapped so guaranteed to work */
466 header = MmGetSystemAddressForMdlSafe(mdl, LowPagePriority);
467 #if NTDDI_VERSION < NTDDI_VISTA
468 #else
469 header += NET_BUFFER_CURRENT_MDL_OFFSET(packet);
470 #endif
472 #if NTDDI_VERSION < NTDDI_VISTA
473 #else
474 xi->stats.ifHCOutOctets += packet->DataLength;
475 if (packet->DataLength < XN_HDR_SIZE || !(header[0] & 0x01)) {
476 /* unicast or tiny packet */
477 xi->stats.ifHCOutUcastPkts++;
478 xi->stats.ifHCOutUcastOctets += packet->DataLength;
479 }
480 else if (header[0] == 0xFF && header[1] == 0xFF && header[2] == 0xFF
481 && header[3] == 0xFF && header[4] == 0xFF && header[5] == 0xFF) {
482 /* broadcast */
483 xi->stats.ifHCOutBroadcastPkts++;
484 xi->stats.ifHCOutBroadcastOctets += packet->DataLength;
485 } else {
486 /* multicast */
487 xi->stats.ifHCOutMulticastPkts++;
488 xi->stats.ifHCOutMulticastOctets += packet->DataLength;
489 }
490 #endif
492 #if NTDDI_VERSION < NTDDI_VISTA
493 PACKET_NEXT_PACKET(packet) = NULL;
494 if (!head) {
495 head = packet;
496 } else {
497 PACKET_NEXT_PACKET(tail) = packet;
498 }
499 tail = packet;
500 #else
501 nbl = NB_NBL(packet);
502 NBL_REF(nbl)--;
503 if (!NBL_REF(nbl)) {
504 NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL;
505 if (head) {
506 NET_BUFFER_LIST_NEXT_NBL(tail) = nbl;
507 tail = nbl;
508 } else {
509 head = nbl;
510 tail = nbl;
511 }
512 }
513 #endif
514 shadow->packet = NULL;
515 tx_packets++;
516 }
517 put_id_on_freelist(xi, txrsp->id);
518 }
520 xi->tx_ring.rsp_cons = prod;
521 /* resist the temptation to set the event more than +1... it breaks things */
522 if (!dont_set_event)
523 xi->tx_ring.sring->rsp_event = prod + 1;
524 KeMemoryBarrier();
525 } while (prod != xi->tx_ring.sring->rsp_prod);
527 /* if queued packets, send them now */
528 XenNet_SendQueuedPackets(xi);
530 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
532 /* must be done without holding any locks */
533 #if NTDDI_VERSION < NTDDI_VISTA
534 while (head) {
535 packet = (PNDIS_PACKET)head;
536 head = PACKET_NEXT_PACKET(packet);
537 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
538 }
539 #else
540 if (head)
541 NdisMSendNetBufferListsComplete(xi->adapter_handle, head, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
542 #endif
544 /* must be done after we have truly given back all packets */
545 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
546 xi->tx_outstanding -= tx_packets;
547 if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
548 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
549 }
550 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
551 }
553 #if NTDDI_VERSION < NTDDI_VISTA
554 VOID
555 XenNet_SendPackets(NDIS_HANDLE MiniportAdapterContext, PPNDIS_PACKET PacketArray, UINT NumberOfPackets) {
556 struct xennet_info *xi = MiniportAdapterContext;
557 PNDIS_PACKET packet;
558 UINT i;
559 PLIST_ENTRY entry;
560 KIRQL old_irql;
562 if (xi->device_state != DEVICE_STATE_ACTIVE) {
563 for (i = 0; i < NumberOfPackets; i++) {
564 NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
565 }
566 return;
567 }
569 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
571 for (i = 0; i < NumberOfPackets; i++) {
572 packet = PacketArray[i];
573 XN_ASSERT(packet);
574 entry = &PACKET_LIST_ENTRY(packet);
575 InsertTailList(&xi->tx_waiting_pkt_list, entry);
576 }
578 XenNet_SendQueuedPackets(xi);
580 KeReleaseSpinLock(&xi->tx_lock, old_irql);
581 }
582 #else
583 // called at <= DISPATCH_LEVEL
584 VOID
585 XenNet_SendNetBufferLists(
586 NDIS_HANDLE adapter_context,
587 PNET_BUFFER_LIST nb_lists,
588 NDIS_PORT_NUMBER port_number,
589 ULONG send_flags) {
590 struct xennet_info *xi = adapter_context;
591 PLIST_ENTRY nb_entry;
592 KIRQL old_irql;
593 PNET_BUFFER_LIST curr_nbl;
594 PNET_BUFFER_LIST next_nbl;
596 UNREFERENCED_PARAMETER(port_number);
598 if (xi->device_state == DEVICE_STATE_INACTIVE) {
599 curr_nbl = nb_lists;
600 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl)) {
601 curr_nbl->Status = NDIS_STATUS_FAILURE;
602 }
603 /* this actions the whole list */
604 NdisMSendNetBufferListsComplete(xi->adapter_handle, nb_lists, (send_flags & NDIS_SEND_FLAGS_DISPATCH_LEVEL)?NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL:0);
605 return;
606 }
608 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
610 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = next_nbl) {
611 PNET_BUFFER curr_nb;
612 NBL_REF(curr_nbl) = 0;
613 next_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl);
614 NET_BUFFER_LIST_NEXT_NBL(curr_nbl) = NULL;
615 for (curr_nb = NET_BUFFER_LIST_FIRST_NB(curr_nbl); curr_nb; curr_nb = NET_BUFFER_NEXT_NB(curr_nb)) {
616 NB_NBL(curr_nb) = curr_nbl;
617 nb_entry = &NB_LIST_ENTRY(curr_nb);
618 InsertTailList(&xi->tx_waiting_pkt_list, nb_entry);
619 NBL_REF(curr_nbl)++;
620 }
621 }
623 XenNet_SendQueuedPackets(xi);
625 KeReleaseSpinLock(&xi->tx_lock, old_irql);
626 }
627 #endif
629 VOID
630 XenNet_CancelSend(NDIS_HANDLE adapter_context, PVOID cancel_id)
631 {
632 UNREFERENCED_PARAMETER(adapter_context);
633 UNREFERENCED_PARAMETER(cancel_id);
634 FUNCTION_ENTER();
636 FUNCTION_EXIT();
637 }
639 BOOLEAN
640 XenNet_TxInit(xennet_info_t *xi) {
641 USHORT i;
642 UNREFERENCED_PARAMETER(xi);
644 KeInitializeSpinLock(&xi->tx_lock);
645 InitializeListHead(&xi->tx_waiting_pkt_list);
647 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
648 xi->tx_outstanding = 0;
649 xi->tx_ring_free = NET_TX_RING_SIZE;
651 NdisInitializeNPagedLookasideList(&xi->tx_lookaside_list, NULL, NULL, 0,
652 PAGE_SIZE, XENNET_POOL_TAG, 0);
654 xi->tx_id_free = 0;
655 for (i = 0; i < NET_TX_RING_SIZE; i++) {
656 xi->tx_shadows[i].gref = INVALID_GRANT_REF;
657 xi->tx_shadows[i].cb = NULL;
658 put_id_on_freelist(xi, i);
659 }
661 return TRUE;
662 }
664 /*
665 The ring is completely closed down now. We just need to empty anything left
666 on our freelists and harvest anything left on the rings.
667 */
669 BOOLEAN
670 XenNet_TxShutdown(xennet_info_t *xi) {
671 #if NTDDI_VERSION < NTDDI_VISTA
672 PNDIS_PACKET packet;
673 #else
674 PNET_BUFFER packet;
675 PNET_BUFFER_LIST nbl;
676 #endif
677 PLIST_ENTRY entry;
678 LARGE_INTEGER timeout;
679 KIRQL old_irql;
681 FUNCTION_ENTER();
683 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
685 while (xi->tx_outstanding) {
686 KeReleaseSpinLock(&xi->tx_lock, old_irql);
687 FUNCTION_MSG("Waiting for %d remaining packets to be sent\n", xi->tx_outstanding);
688 timeout.QuadPart = -1 * 1 * 1000 * 1000 * 10; /* 1 second */
689 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, &timeout);
690 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
691 }
692 KeReleaseSpinLock(&xi->tx_lock, old_irql);
694 /* Free packets in tx queue */
695 while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
696 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
697 #if NTDDI_VERSION < NTDDI_VISTA
698 packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
699 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
700 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
701 #else
702 packet = CONTAINING_RECORD(entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
703 nbl = NB_NBL(packet);
704 NBL_REF(nbl)--;
705 if (!NBL_REF(nbl)) {
706 nbl->Status = NDIS_STATUS_FAILURE;
707 NdisMSendNetBufferListsComplete(xi->adapter_handle, nbl, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
708 }
709 #endif
710 }
711 NdisDeleteNPagedLookasideList(&xi->tx_lookaside_list);
713 FUNCTION_EXIT();
715 return TRUE;
716 }