win-pvdrivers

view xennet/xennet_tx.c @ 1071:83201dc2ea3f

Fix a bug on LSO checksum where the tcp_length needed to be removed from the pseudoheader checksum
author James Harper <james.harper@bendigoit.com.au>
date Fri Nov 15 11:32:15 2013 +1100 (2013-11-15)
parents 00d29add6a2a
children a60d401aa020
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
24 static USHORT
25 get_id_from_freelist(struct xennet_info *xi)
26 {
27 XN_ASSERT(xi->tx_id_free);
28 xi->tx_id_free--;
30 return xi->tx_id_list[xi->tx_id_free];
31 }
33 static VOID
34 put_id_on_freelist(struct xennet_info *xi, USHORT id)
35 {
36 xi->tx_id_list[xi->tx_id_free] = id;
37 xi->tx_id_free++;
38 }
40 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
42 static __forceinline struct netif_tx_request *
43 XenNet_PutCbOnRing(struct xennet_info *xi, PVOID coalesce_buf, ULONG length, grant_ref_t gref)
44 {
45 struct netif_tx_request *tx;
46 tx = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
47 xi->tx_ring.req_prod_pvt++;
48 xi->tx_ring_free--;
49 tx->id = get_id_from_freelist(xi);
50 XN_ASSERT(xi->tx_shadows[tx->id].gref == INVALID_GRANT_REF);
51 XN_ASSERT(!xi->tx_shadows[tx->id].cb);
52 xi->tx_shadows[tx->id].cb = coalesce_buf;
53 tx->gref = XnGrantAccess(xi->handle, (ULONG)(MmGetPhysicalAddress(coalesce_buf).QuadPart >> PAGE_SHIFT), FALSE, gref, (ULONG)'XNTX');
54 xi->tx_shadows[tx->id].gref = tx->gref;
55 tx->offset = 0;
56 tx->size = (USHORT)length;
57 XN_ASSERT(tx->offset + tx->size <= PAGE_SIZE);
58 XN_ASSERT(tx->size);
59 return tx;
60 }
62 #if 0
63 static VOID dump_packet_data(PNDIS_PACKET packet, PCHAR header) {
64 UINT mdl_count;
65 PMDL first_mdl;
66 UINT total_length;
68 NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &first_mdl, (PUINT)&total_length);
69 FUNCTION_MSG("%s mdl_count = %d, first_mdl = %p, total_length = %d\n", header, mdl_count, first_mdl, total_length);
70 }
71 #endif
73 /* Called at DISPATCH_LEVEL with tx_lock held */
74 /*
75 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
76 */
77 #if NTDDI_VERSION < NTDDI_VISTA
78 static BOOLEAN
79 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet) {
80 #else
81 static BOOLEAN
82 XenNet_HWSendPacket(struct xennet_info *xi, PNET_BUFFER packet) {
83 #endif
84 struct netif_tx_request *tx0 = NULL;
85 struct netif_tx_request *txN = NULL;
86 struct netif_extra_info *ei = NULL;
87 ULONG mss = 0;
88 #if NTDDI_VERSION < NTDDI_VISTA
89 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
90 UINT mdl_count;
91 #else
92 NDIS_TCP_LARGE_SEND_OFFLOAD_NET_BUFFER_LIST_INFO lso_info;
93 NDIS_TCP_IP_CHECKSUM_NET_BUFFER_LIST_INFO csum_info;
94 #endif
95 uint16_t flags = NETTXF_more_data;
96 packet_info_t pi;
97 BOOLEAN ndis_lso = FALSE;
98 BOOLEAN xen_gso = FALSE;
99 ULONG remaining;
100 ULONG frags = 0;
101 BOOLEAN coalesce_required = FALSE;
102 PVOID coalesce_buf;
103 ULONG coalesce_remaining = 0;
104 grant_ref_t gref;
105 ULONG tx_length = 0;
107 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
108 if (gref == INVALID_GRANT_REF)
109 {
110 FUNCTION_MSG("out of grefs\n");
111 return FALSE;
112 }
113 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
114 if (!coalesce_buf)
115 {
116 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
117 FUNCTION_MSG("out of memory\n");
118 return FALSE;
119 }
120 XenNet_ClearPacketInfo(&pi);
121 #if NTDDI_VERSION < NTDDI_VISTA
122 NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &pi.first_mdl, (PUINT)&pi.total_length);
123 pi.curr_mdl = pi.first_mdl;
124 #else
125 /* create a new MDL over the data portion of the first MDL in the packet... it's just easier this way */
126 IoBuildPartialMdl(packet->CurrentMdl,
127 &pi.first_mdl_storage,
128 (PUCHAR)MmGetMdlVirtualAddress(packet->CurrentMdl) + packet->CurrentMdlOffset,
129 MmGetMdlByteCount(packet->CurrentMdl) - packet->CurrentMdlOffset);
130 pi.total_length = packet->DataLength;
131 pi.first_mdl_storage.Next = packet->CurrentMdl->Next;
132 pi.first_mdl = pi.curr_mdl = &pi.first_mdl_storage;
133 #endif
134 pi.first_mdl_offset = pi.curr_mdl_offset = 0;
135 remaining = min(pi.total_length, PAGE_SIZE);
136 while (remaining) { /* this much gets put in the header */
137 ULONG length = XenNet_QueryData(&pi, remaining);
138 remaining -= length;
139 XenNet_EatData(&pi, length);
140 }
141 frags++;
142 if (pi.total_length > PAGE_SIZE) { /* these are the frags we care about */
143 remaining = pi.total_length - PAGE_SIZE;
144 while (remaining) {
145 ULONG length = XenNet_QueryData(&pi, PAGE_SIZE);
146 if (length != 0) {
147 frags++;
148 if (frags > LINUX_MAX_SG_ELEMENTS)
149 break; /* worst case there could be hundreds of fragments - leave the loop now */
150 }
151 remaining -= length;
152 XenNet_EatData(&pi, length);
153 }
154 }
155 if (frags > LINUX_MAX_SG_ELEMENTS) {
156 frags = LINUX_MAX_SG_ELEMENTS;
157 coalesce_required = TRUE;
158 }
160 /* if we have enough space on the ring then we have enough id's so no need to check for that */
161 if (xi->tx_ring_free < frags + 1) {
162 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
163 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
164 //FUNCTION_MSG("Full on send - ring full\n");
165 return FALSE;
166 }
167 XenNet_ParsePacketHeader(&pi, coalesce_buf, PAGE_SIZE);
168 remaining = pi.total_length - pi.header_length;
169 if (pi.ip_version == 4 && pi.ip_proto == 6 && pi.ip4_length == 0) {
170 *((PUSHORT)(pi.header + 0x10)) = GET_NET_USHORT((USHORT)pi.total_length - XN_HDR_SIZE);
171 }
173 #if NTDDI_VERSION < NTDDI_VISTA
174 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP) {
175 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
176 packet, TcpIpChecksumPacketInfo);
177 if (csum_info->Transmit.NdisPacketChecksumV4) {
178 if (csum_info->Transmit.NdisPacketTcpChecksum) {
179 flags |= NETTXF_csum_blank | NETTXF_data_validated;
180 } else if (csum_info->Transmit.NdisPacketUdpChecksum) {
181 flags |= NETTXF_csum_blank | NETTXF_data_validated;
182 }
183 }
184 }
185 #else
186 csum_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(packet), TcpIpChecksumNetBufferListInfo);
187 if (csum_info.Transmit.IsIPv4) {
188 if (csum_info.Transmit.TcpChecksum) {
189 flags |= NETTXF_csum_blank | NETTXF_data_validated;
190 } else if (csum_info.Transmit.UdpChecksum) {
191 flags |= NETTXF_csum_blank | NETTXF_data_validated;
192 }
193 } else if (csum_info.Transmit.IsIPv6) {
194 FUNCTION_MSG("Transmit.IsIPv6 not supported\n");
195 }
196 #endif
198 #if NTDDI_VERSION < NTDDI_VISTA
199 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
200 #else
201 lso_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(packet), TcpLargeSendNetBufferListInfo);
202 switch (lso_info.Transmit.Type) {
203 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
204 mss = lso_info.LsoV1Transmit.MSS;
205 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
206 break;
207 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
208 mss = lso_info.LsoV2Transmit.MSS;
209 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
210 break;
211 }
212 #endif
213 if (mss && pi.parse_result == PARSE_OK) {
214 ndis_lso = TRUE;
215 }
217 if (ndis_lso) {
218 ULONG csum;
219 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
220 if (pi.tcp_length >= mss) {
221 flags |= NETTXF_extra_info;
222 xen_gso = TRUE;
223 }
224 /* Adjust pseudoheader checksum to be what Linux expects (remove the tcp_length) */
225 csum = ~RtlUshortByteSwap(*(PUSHORT)&pi.header[XN_HDR_SIZE + pi.ip4_header_length + 16]);
226 csum -= (pi.ip4_length - pi.ip4_header_length);
227 while (csum & 0xFFFF0000)
228 csum = (csum & 0xFFFF) + (csum >> 16);
229 *(PUSHORT)&pi.header[XN_HDR_SIZE + pi.ip4_header_length + 16] = ~RtlUshortByteSwap((USHORT)csum);
230 }
231 /*
232 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
233 * (C) rest of requests on the ring. Only (A) has csum flags.
234 */
236 /* (A) */
237 tx0 = XenNet_PutCbOnRing(xi, coalesce_buf, pi.header_length, gref);
238 XN_ASSERT(tx0); /* this will never happen */
239 tx0->flags = flags;
240 tx_length += pi.header_length;
242 /* lso implies IpHeaderChecksum */
243 #if NTDDI_VERSION < NTDDI_VISTA
244 if (ndis_lso) {
245 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
246 }
247 #else
248 if (ndis_lso || csum_info.Transmit.IpHeaderChecksum) {
249 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
250 }
251 #endif
252 txN = tx0;
254 /* (B) */
255 if (xen_gso) {
256 XN_ASSERT(flags & NETTXF_extra_info);
257 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
258 //KdPrint((__DRIVER_NAME " pos = %d\n", xi->tx_ring.req_prod_pvt));
259 xi->tx_ring.req_prod_pvt++;
260 xi->tx_ring_free--;
261 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
262 ei->flags = 0;
263 ei->u.gso.size = (USHORT)mss;
264 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
265 ei->u.gso.pad = 0;
266 ei->u.gso.features = 0;
267 }
269 XN_ASSERT(xi->current_sg_supported || !remaining);
271 /* (C) - only if data is remaining */
272 coalesce_buf = NULL;
273 while (remaining > 0) {
274 ULONG length;
275 PFN_NUMBER pfn;
277 XN_ASSERT(pi.curr_mdl);
278 if (coalesce_required) {
279 PVOID va;
280 if (!coalesce_buf) {
281 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
282 if (gref == INVALID_GRANT_REF) {
283 FUNCTION_MSG("out of grefs - partial send\n");
284 break;
285 }
286 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
287 if (!coalesce_buf) {
288 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
289 FUNCTION_MSG("out of memory - partial send\n");
290 break;
291 }
292 coalesce_remaining = min(PAGE_SIZE, remaining);
293 }
294 length = XenNet_QueryData(&pi, coalesce_remaining);
295 va = NdisBufferVirtualAddressSafe(pi.curr_mdl, LowPagePriority);
296 if (!va) {
297 FUNCTION_MSG("failed to map buffer va - partial send\n");
298 coalesce_remaining = 0;
299 remaining -= min(PAGE_SIZE, remaining);
300 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
301 } else {
302 memcpy((PUCHAR)coalesce_buf + min(PAGE_SIZE, remaining) - coalesce_remaining, (PUCHAR)va + pi.curr_mdl_offset, length);
303 coalesce_remaining -= length;
304 }
305 } else {
306 length = XenNet_QueryData(&pi, PAGE_SIZE);
307 }
308 if (!length || coalesce_remaining) { /* sometimes there are zero length buffers... */
309 XenNet_EatData(&pi, length); /* do this so we actually move to the next buffer */
310 continue;
311 }
313 if (coalesce_buf) {
314 if (remaining) {
315 txN = XenNet_PutCbOnRing(xi, coalesce_buf, min(PAGE_SIZE, remaining), gref);
316 XN_ASSERT(txN);
317 coalesce_buf = NULL;
318 tx_length += min(PAGE_SIZE, remaining);
319 remaining -= min(PAGE_SIZE, remaining);
320 }
321 } else {
322 ULONG offset;
324 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
325 if (gref == INVALID_GRANT_REF) {
326 FUNCTION_MSG("out of grefs - partial send\n");
327 break;
328 }
329 txN = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
330 xi->tx_ring.req_prod_pvt++;
331 xi->tx_ring_free--;
332 txN->id = get_id_from_freelist(xi);
333 XN_ASSERT(!xi->tx_shadows[txN->id].cb);
334 offset = MmGetMdlByteOffset(pi.curr_mdl) + pi.curr_mdl_offset;
335 pfn = MmGetMdlPfnArray(pi.curr_mdl)[offset >> PAGE_SHIFT];
336 txN->offset = (USHORT)offset & (PAGE_SIZE - 1);
337 txN->gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, gref, (ULONG)'XNTX');
338 XN_ASSERT(xi->tx_shadows[txN->id].gref == INVALID_GRANT_REF);
339 xi->tx_shadows[txN->id].gref = txN->gref;
340 //ASSERT(sg->Elements[sg_element].Length > sg_offset);
341 txN->size = (USHORT)length;
342 XN_ASSERT(txN->offset + txN->size <= PAGE_SIZE);
343 XN_ASSERT(txN->size);
344 XN_ASSERT(txN->gref != INVALID_GRANT_REF);
345 remaining -= length;
346 tx_length += length;
347 }
348 tx0->size = tx0->size + txN->size;
349 txN->flags = NETTXF_more_data;
350 XenNet_EatData(&pi, length);
351 }
352 txN->flags &= ~NETTXF_more_data;
353 XN_ASSERT(tx0->size == pi.total_length);
354 XN_ASSERT(!xi->tx_shadows[txN->id].packet);
355 xi->tx_shadows[txN->id].packet = packet;
357 #if NTDDI_VERSION < NTDDI_VISTA
358 if (ndis_lso) {
359 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length);
360 }
361 #else
362 switch (lso_info.Transmit.Type) {
363 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
364 lso_info.LsoV1TransmitComplete.TcpPayload = tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length;
365 break;
366 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
367 break;
368 }
369 #endif
371 xi->tx_outstanding++;
372 return TRUE;
373 }
375 /* Called at DISPATCH_LEVEL with tx_lock held */
376 static VOID
377 XenNet_SendQueuedPackets(struct xennet_info *xi)
378 {
379 PLIST_ENTRY entry;
380 #if NTDDI_VERSION < NTDDI_VISTA
381 PNDIS_PACKET packet;
382 #else
383 PNET_BUFFER packet;
384 #endif
385 int notify;
387 if (xi->device_state != DEVICE_STATE_ACTIVE)
388 return;
390 while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
391 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
392 #if NTDDI_VERSION < NTDDI_VISTA
393 packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
394 #else
395 packet = CONTAINING_RECORD(entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
396 #endif
397 if (!XenNet_HWSendPacket(xi, packet)) {
398 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
399 break;
400 }
401 }
403 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx_ring, notify);
404 if (notify) {
405 XnNotify(xi->handle, xi->event_channel);
406 }
407 }
409 // Called at DISPATCH_LEVEL
410 VOID
411 XenNet_TxBufferGC(struct xennet_info *xi, BOOLEAN dont_set_event) {
412 RING_IDX cons, prod;
413 #if NTDDI_VERSION < NTDDI_VISTA
414 PNDIS_PACKET head = NULL, tail = NULL;
415 PNDIS_PACKET packet;
416 #else
417 PNET_BUFFER_LIST head = NULL;
418 PNET_BUFFER_LIST tail = NULL;
419 PNET_BUFFER_LIST nbl;
420 PNET_BUFFER packet;
421 #endif
422 ULONG tx_packets = 0;
424 XN_ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
426 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
428 if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
429 /* there is a chance that our Dpc had been queued just before the shutdown... */
430 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
431 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
432 return;
433 }
435 do {
436 prod = xi->tx_ring.sring->rsp_prod;
437 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
439 for (cons = xi->tx_ring.rsp_cons; cons != prod; cons++)
440 {
441 struct netif_tx_response *txrsp;
442 tx_shadow_t *shadow;
444 txrsp = RING_GET_RESPONSE(&xi->tx_ring, cons);
446 xi->tx_ring_free++;
448 if (txrsp->status == NETIF_RSP_NULL) {
449 continue;
450 }
452 shadow = &xi->tx_shadows[txrsp->id];
453 if (shadow->cb) {
454 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, shadow->cb);
455 shadow->cb = NULL;
456 }
458 if (shadow->gref != INVALID_GRANT_REF) {
459 XnEndAccess(xi->handle, shadow->gref, FALSE, (ULONG)'XNTX');
460 shadow->gref = INVALID_GRANT_REF;
461 }
463 if (shadow->packet) {
464 PMDL mdl;
465 PUCHAR header;
466 packet = shadow->packet;
467 #if NTDDI_VERSION < NTDDI_VISTA
468 mdl = NDIS_PACKET_FIRST_NDIS_BUFFER(packet);
469 #else
470 mdl = NET_BUFFER_CURRENT_MDL(packet);
471 #endif
472 #pragma warning(suppress:28193) /* already mapped so guaranteed to work */
473 header = MmGetSystemAddressForMdlSafe(mdl, LowPagePriority);
474 #if NTDDI_VERSION < NTDDI_VISTA
475 #else
476 header += NET_BUFFER_CURRENT_MDL_OFFSET(packet);
477 #endif
479 #if NTDDI_VERSION < NTDDI_VISTA
480 #else
481 xi->stats.ifHCOutOctets += packet->DataLength;
482 if (packet->DataLength < XN_HDR_SIZE || !(header[0] & 0x01)) {
483 /* unicast or tiny packet */
484 xi->stats.ifHCOutUcastPkts++;
485 xi->stats.ifHCOutUcastOctets += packet->DataLength;
486 }
487 else if (header[0] == 0xFF && header[1] == 0xFF && header[2] == 0xFF
488 && header[3] == 0xFF && header[4] == 0xFF && header[5] == 0xFF) {
489 /* broadcast */
490 xi->stats.ifHCOutBroadcastPkts++;
491 xi->stats.ifHCOutBroadcastOctets += packet->DataLength;
492 } else {
493 /* multicast */
494 xi->stats.ifHCOutMulticastPkts++;
495 xi->stats.ifHCOutMulticastOctets += packet->DataLength;
496 }
497 #endif
499 #if NTDDI_VERSION < NTDDI_VISTA
500 PACKET_NEXT_PACKET(packet) = NULL;
501 if (!head) {
502 head = packet;
503 } else {
504 PACKET_NEXT_PACKET(tail) = packet;
505 }
506 tail = packet;
507 #else
508 nbl = NB_NBL(packet);
509 NBL_REF(nbl)--;
510 if (!NBL_REF(nbl)) {
511 NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL;
512 if (head) {
513 NET_BUFFER_LIST_NEXT_NBL(tail) = nbl;
514 tail = nbl;
515 } else {
516 head = nbl;
517 tail = nbl;
518 }
519 }
520 #endif
521 shadow->packet = NULL;
522 tx_packets++;
523 }
524 put_id_on_freelist(xi, txrsp->id);
525 }
527 xi->tx_ring.rsp_cons = prod;
528 /* resist the temptation to set the event more than +1... it breaks things */
529 if (!dont_set_event)
530 xi->tx_ring.sring->rsp_event = prod + 1;
531 KeMemoryBarrier();
532 } while (prod != xi->tx_ring.sring->rsp_prod);
534 /* if queued packets, send them now */
535 XenNet_SendQueuedPackets(xi);
537 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
539 /* must be done without holding any locks */
540 #if NTDDI_VERSION < NTDDI_VISTA
541 while (head) {
542 packet = (PNDIS_PACKET)head;
543 head = PACKET_NEXT_PACKET(packet);
544 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
545 }
546 #else
547 if (head)
548 NdisMSendNetBufferListsComplete(xi->adapter_handle, head, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
549 #endif
551 /* must be done after we have truly given back all packets */
552 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
553 xi->tx_outstanding -= tx_packets;
554 if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
555 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
556 }
557 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
558 }
560 #if NTDDI_VERSION < NTDDI_VISTA
561 VOID
562 XenNet_SendPackets(NDIS_HANDLE MiniportAdapterContext, PPNDIS_PACKET PacketArray, UINT NumberOfPackets) {
563 struct xennet_info *xi = MiniportAdapterContext;
564 PNDIS_PACKET packet;
565 UINT i;
566 PLIST_ENTRY entry;
567 KIRQL old_irql;
569 if (xi->device_state != DEVICE_STATE_ACTIVE) {
570 for (i = 0; i < NumberOfPackets; i++) {
571 NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
572 }
573 return;
574 }
576 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
578 for (i = 0; i < NumberOfPackets; i++) {
579 packet = PacketArray[i];
580 XN_ASSERT(packet);
581 entry = &PACKET_LIST_ENTRY(packet);
582 InsertTailList(&xi->tx_waiting_pkt_list, entry);
583 }
585 XenNet_SendQueuedPackets(xi);
587 KeReleaseSpinLock(&xi->tx_lock, old_irql);
588 }
589 #else
590 // called at <= DISPATCH_LEVEL
591 VOID
592 XenNet_SendNetBufferLists(
593 NDIS_HANDLE adapter_context,
594 PNET_BUFFER_LIST nb_lists,
595 NDIS_PORT_NUMBER port_number,
596 ULONG send_flags) {
597 struct xennet_info *xi = adapter_context;
598 PLIST_ENTRY nb_entry;
599 KIRQL old_irql;
600 PNET_BUFFER_LIST curr_nbl;
601 PNET_BUFFER_LIST next_nbl;
603 UNREFERENCED_PARAMETER(port_number);
605 if (xi->device_state == DEVICE_STATE_INACTIVE) {
606 curr_nbl = nb_lists;
607 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl)) {
608 curr_nbl->Status = NDIS_STATUS_FAILURE;
609 }
610 /* this actions the whole list */
611 NdisMSendNetBufferListsComplete(xi->adapter_handle, nb_lists, (send_flags & NDIS_SEND_FLAGS_DISPATCH_LEVEL)?NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL:0);
612 return;
613 }
615 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
617 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = next_nbl) {
618 PNET_BUFFER curr_nb;
619 NBL_REF(curr_nbl) = 0;
620 next_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl);
621 NET_BUFFER_LIST_NEXT_NBL(curr_nbl) = NULL;
622 for (curr_nb = NET_BUFFER_LIST_FIRST_NB(curr_nbl); curr_nb; curr_nb = NET_BUFFER_NEXT_NB(curr_nb)) {
623 NB_NBL(curr_nb) = curr_nbl;
624 nb_entry = &NB_LIST_ENTRY(curr_nb);
625 InsertTailList(&xi->tx_waiting_pkt_list, nb_entry);
626 NBL_REF(curr_nbl)++;
627 }
628 }
630 XenNet_SendQueuedPackets(xi);
632 KeReleaseSpinLock(&xi->tx_lock, old_irql);
633 }
634 #endif
636 VOID
637 XenNet_CancelSend(NDIS_HANDLE adapter_context, PVOID cancel_id)
638 {
639 UNREFERENCED_PARAMETER(adapter_context);
640 UNREFERENCED_PARAMETER(cancel_id);
641 FUNCTION_ENTER();
643 FUNCTION_EXIT();
644 }
646 BOOLEAN
647 XenNet_TxInit(xennet_info_t *xi) {
648 USHORT i;
649 UNREFERENCED_PARAMETER(xi);
651 KeInitializeSpinLock(&xi->tx_lock);
652 InitializeListHead(&xi->tx_waiting_pkt_list);
654 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
655 xi->tx_outstanding = 0;
656 xi->tx_ring_free = NET_TX_RING_SIZE;
658 NdisInitializeNPagedLookasideList(&xi->tx_lookaside_list, NULL, NULL, 0,
659 PAGE_SIZE, XENNET_POOL_TAG, 0);
661 xi->tx_id_free = 0;
662 for (i = 0; i < NET_TX_RING_SIZE; i++) {
663 xi->tx_shadows[i].gref = INVALID_GRANT_REF;
664 xi->tx_shadows[i].cb = NULL;
665 put_id_on_freelist(xi, i);
666 }
668 return TRUE;
669 }
671 /*
672 The ring is completely closed down now. We just need to empty anything left
673 on our freelists and harvest anything left on the rings.
674 */
676 BOOLEAN
677 XenNet_TxShutdown(xennet_info_t *xi) {
678 #if NTDDI_VERSION < NTDDI_VISTA
679 PNDIS_PACKET packet;
680 #else
681 PNET_BUFFER packet;
682 PNET_BUFFER_LIST nbl;
683 #endif
684 PLIST_ENTRY entry;
685 LARGE_INTEGER timeout;
686 KIRQL old_irql;
688 FUNCTION_ENTER();
690 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
692 while (xi->tx_outstanding) {
693 KeReleaseSpinLock(&xi->tx_lock, old_irql);
694 FUNCTION_MSG("Waiting for %d remaining packets to be sent\n", xi->tx_outstanding);
695 timeout.QuadPart = -1 * 1 * 1000 * 1000 * 10; /* 1 second */
696 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, &timeout);
697 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
698 }
699 KeReleaseSpinLock(&xi->tx_lock, old_irql);
701 /* Free packets in tx queue */
702 while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
703 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
704 #if NTDDI_VERSION < NTDDI_VISTA
705 packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
706 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
707 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
708 #else
709 packet = CONTAINING_RECORD(entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
710 nbl = NB_NBL(packet);
711 NBL_REF(nbl)--;
712 if (!NBL_REF(nbl)) {
713 nbl->Status = NDIS_STATUS_FAILURE;
714 NdisMSendNetBufferListsComplete(xi->adapter_handle, nbl, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
715 }
716 #endif
717 }
718 NdisDeleteNPagedLookasideList(&xi->tx_lookaside_list);
720 FUNCTION_EXIT();
722 return TRUE;
723 }