win-pvdrivers

view xennet/xennet_tx.c @ 790:467005e7f509

Big messy changes. Add grant ref tagging to better track when things go wrong (debug build only).
Fix a race in xennet that causes crashes under heavy traffic conditions on driver shutdown.
author James Harper <james.harper@bendigoit.com.au>
date Fri Mar 12 09:38:42 2010 +1100 (2010-03-12)
parents 6304eb6bb690
children 9c0c4210b778
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 /* Not really necessary but keeps PREfast happy */
24 static KDEFERRED_ROUTINE XenNet_TxBufferGC;
26 static USHORT
27 get_id_from_freelist(struct xennet_info *xi)
28 {
29 ASSERT(xi->tx_id_free);
30 xi->tx_id_free--;
32 return xi->tx_id_list[xi->tx_id_free];
33 }
35 static VOID
36 put_id_on_freelist(struct xennet_info *xi, USHORT id)
37 {
38 xi->tx_id_list[xi->tx_id_free] = id;
39 xi->tx_id_free++;
40 }
42 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
44 static __forceinline struct netif_tx_request *
45 XenNet_PutCbOnRing(struct xennet_info *xi, PVOID coalesce_buf, ULONG length, grant_ref_t gref)
46 {
47 struct netif_tx_request *tx;
48 tx = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
49 xi->tx.req_prod_pvt++;
50 xi->tx_ring_free--;
51 tx->id = get_id_from_freelist(xi);
52 ASSERT(xi->tx_shadows[tx->id].gref == INVALID_GRANT_REF);
53 ASSERT(!xi->tx_shadows[tx->id].cb);
54 xi->tx_shadows[tx->id].cb = coalesce_buf;
55 tx->gref = xi->vectors.GntTbl_GrantAccess(xi->vectors.context, 0, (ULONG)(MmGetPhysicalAddress(coalesce_buf).QuadPart >> PAGE_SHIFT), FALSE, gref, (ULONG)'XNTX');
56 xi->tx_shadows[tx->id].gref = tx->gref;
57 tx->offset = 0;
58 tx->size = (USHORT)length;
59 ASSERT(tx->offset + tx->size <= PAGE_SIZE);
60 ASSERT(tx->size);
61 return tx;
62 }
64 /* Called at DISPATCH_LEVEL with tx_lock held */
65 /*
66 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
67 */
68 static BOOLEAN
69 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet)
70 {
71 struct netif_tx_request *tx0 = NULL;
72 struct netif_tx_request *txN = NULL;
73 struct netif_extra_info *ei = NULL;
74 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
75 ULONG mss = 0;
76 uint16_t flags = NETTXF_more_data;
77 packet_info_t pi;
78 BOOLEAN ndis_lso = FALSE;
79 BOOLEAN xen_gso = FALSE;
80 ULONG remaining;
81 ULONG parse_result;
82 ULONG frags = 0;
83 BOOLEAN coalesce_required = FALSE;
84 PVOID coalesce_buf;
85 ULONG coalesce_remaining = 0;
86 grant_ref_t gref;
87 ULONG tx_length = 0;
89 //FUNCTION_ENTER();
91 gref = xi->vectors.GntTbl_GetRef(xi->vectors.context, (ULONG)'XNTX');
92 if (gref == INVALID_GRANT_REF)
93 {
94 KdPrint((__DRIVER_NAME " out of grefs\n"));
95 return FALSE;
96 }
97 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
98 if (!coalesce_buf)
99 {
100 xi->vectors.GntTbl_PutRef(xi->vectors.context, gref, (ULONG)'XNTX');
101 KdPrint((__DRIVER_NAME " out of memory\n"));
102 return FALSE;
103 }
104 XenNet_ClearPacketInfo(&pi);
105 NdisQueryPacket(packet, NULL, (PUINT)&pi.mdl_count, &pi.first_buffer, (PUINT)&pi.total_length);
107 pi.curr_mdl_offset = 0;
108 pi.curr_buffer = pi.first_buffer;
109 remaining = min(pi.total_length, PAGE_SIZE);
110 while (remaining) /* this much gets put in the header */
111 {
112 ULONG length = XenNet_QueryData(&pi, remaining);
113 remaining -= length;
114 XenNet_EatData(&pi, length);
115 }
116 frags++;
117 if (pi.total_length > PAGE_SIZE) /* these are the frags we care about */
118 {
119 remaining = pi.total_length - PAGE_SIZE;
120 while (remaining)
121 {
122 ULONG length = XenNet_QueryData(&pi, PAGE_SIZE);
123 if (length != 0)
124 {
125 frags++;
126 if (frags > LINUX_MAX_SG_ELEMENTS)
127 break; /* worst case there could be hundreds of fragments - leave the loop now */
128 }
129 remaining -= length;
130 XenNet_EatData(&pi, length);
131 }
132 }
133 if (frags > LINUX_MAX_SG_ELEMENTS)
134 {
135 frags = LINUX_MAX_SG_ELEMENTS;
136 coalesce_required = TRUE;
137 }
139 /* if we have enough space on the ring then we have enough id's so no need to check for that */
140 if (xi->tx_ring_free < frags + 1)
141 {
142 xi->vectors.GntTbl_PutRef(xi->vectors.context, gref, (ULONG)'XNTX');
143 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
144 //KdPrint((__DRIVER_NAME " Full on send - ring full\n"));
145 return FALSE;
146 }
148 parse_result = XenNet_ParsePacketHeader(&pi, coalesce_buf, PAGE_SIZE);
149 remaining = pi.total_length - pi.header_length;
151 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP)
152 {
153 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
154 packet, TcpIpChecksumPacketInfo);
155 if (csum_info->Transmit.NdisPacketChecksumV4)
156 {
157 if (csum_info->Transmit.NdisPacketIpChecksum && !xi->setting_csum.V4Transmit.IpChecksum)
158 {
159 KdPrint((__DRIVER_NAME " IpChecksum not enabled\n"));
160 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
161 //return TRUE;
162 }
163 if (csum_info->Transmit.NdisPacketTcpChecksum)
164 {
165 if (xi->setting_csum.V4Transmit.TcpChecksum)
166 {
167 flags |= NETTXF_csum_blank | NETTXF_data_validated;
168 }
169 else
170 {
171 KdPrint((__DRIVER_NAME " TcpChecksum not enabled\n"));
172 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
173 //return TRUE;
174 }
175 }
176 else if (csum_info->Transmit.NdisPacketUdpChecksum)
177 {
178 if (xi->setting_csum.V4Transmit.UdpChecksum)
179 {
180 flags |= NETTXF_csum_blank | NETTXF_data_validated;
181 }
182 else
183 {
184 KdPrint((__DRIVER_NAME " UdpChecksum not enabled\n"));
185 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
186 //return TRUE;
187 }
188 }
189 }
190 else if (csum_info->Transmit.NdisPacketChecksumV6)
191 {
192 KdPrint((__DRIVER_NAME " NdisPacketChecksumV6 not supported\n"));
193 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
194 //return TRUE;
195 }
196 }
198 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
200 if (mss && parse_result == PARSE_OK)
201 {
202 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) != NDIS_PROTOCOL_ID_TCP_IP)
203 {
204 KdPrint((__DRIVER_NAME " mss specified when packet is not NDIS_PROTOCOL_ID_TCP_IP\n"));
205 }
206 ndis_lso = TRUE;
207 if (mss > xi->setting_max_offload)
208 {
209 KdPrint((__DRIVER_NAME " Requested MSS (%d) larger than allowed MSS (%d)\n", mss, xi->setting_max_offload));
210 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
211 //FUNCTION_EXIT();
212 return TRUE;
213 }
214 }
216 if (ndis_lso)
217 {
218 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
219 if (pi.tcp_length >= mss)
220 {
221 flags |= NETTXF_extra_info;
222 xen_gso = TRUE;
223 }
224 else
225 {
226 KdPrint((__DRIVER_NAME " large send specified when tcp_length < mss\n"));
227 }
228 }
230 /*
231 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
232 * (C) rest of requests on the ring. Only (A) has csum flags.
233 */
235 /* (A) */
236 tx0 = XenNet_PutCbOnRing(xi, coalesce_buf, pi.header_length, gref);
237 ASSERT(tx0); /* this will never happen */
238 tx0->flags = flags;
239 tx_length += pi.header_length;
241 /* even though we haven't reported that we are capable of it, LSO demands that we calculate the IP Header checksum */
242 if (ndis_lso)
243 {
244 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
245 }
246 txN = tx0;
248 /* (B) */
249 if (xen_gso)
250 {
251 ASSERT(flags & NETTXF_extra_info);
252 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
253 //KdPrint((__DRIVER_NAME " pos = %d\n", xi->tx.req_prod_pvt));
254 xi->tx.req_prod_pvt++;
255 xi->tx_ring_free--;
256 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
257 ei->flags = 0;
258 ei->u.gso.size = (USHORT)mss;
259 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
260 ei->u.gso.pad = 0;
261 ei->u.gso.features = 0;
262 }
264 ASSERT(xi->config_sg || !remaining);
266 /* (C) - only if data is remaining */
267 coalesce_buf = NULL;
268 while (remaining > 0)
269 {
270 ULONG length;
271 PFN_NUMBER pfn;
273 ASSERT(pi.curr_buffer);
274 if (coalesce_required)
275 {
276 PVOID va;
277 if (!coalesce_buf)
278 {
279 gref = xi->vectors.GntTbl_GetRef(xi->vectors.context, (ULONG)'XNTX');
280 if (gref == INVALID_GRANT_REF)
281 {
282 KdPrint((__DRIVER_NAME " out of grefs - partial send\n"));
283 break;
284 }
285 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
286 if (!coalesce_buf)
287 {
288 xi->vectors.GntTbl_PutRef(xi->vectors.context, gref, (ULONG)'XNTX');
289 KdPrint((__DRIVER_NAME " out of memory - partial send\n"));
290 break;
291 }
292 coalesce_remaining = min(PAGE_SIZE, remaining);
293 }
294 length = XenNet_QueryData(&pi, coalesce_remaining);
295 va = NdisBufferVirtualAddressSafe(pi.curr_buffer, LowPagePriority);
296 if (!va)
297 {
298 KdPrint((__DRIVER_NAME " failed to map buffer va - partial send\n"));
299 coalesce_remaining = 0;
300 remaining -= min(PAGE_SIZE, remaining);
301 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
302 }
303 else
304 {
305 memcpy((PUCHAR)coalesce_buf + min(PAGE_SIZE, remaining) - coalesce_remaining, (PUCHAR)va + pi.curr_mdl_offset, length);
306 coalesce_remaining -= length;
307 }
308 }
309 else
310 {
311 length = XenNet_QueryData(&pi, PAGE_SIZE);
312 }
313 if (!length || coalesce_remaining) /* sometimes there are zero length buffers... */
314 {
315 XenNet_EatData(&pi, length); /* do this so we actually move to the next buffer */
316 continue;
317 }
319 if (coalesce_buf)
320 {
321 if (remaining)
322 {
323 txN = XenNet_PutCbOnRing(xi, coalesce_buf, min(PAGE_SIZE, remaining), gref);
324 ASSERT(txN);
325 coalesce_buf = NULL;
326 remaining -= min(PAGE_SIZE, remaining);
327 tx_length += min(PAGE_SIZE, remaining);
328 }
329 }
330 else
331 {
332 ULONG offset;
334 gref = xi->vectors.GntTbl_GetRef(xi->vectors.context, (ULONG)'XNTX');
335 if (gref == INVALID_GRANT_REF)
336 {
337 KdPrint((__DRIVER_NAME " out of grefs - partial send\n"));
338 break;
339 }
340 txN = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
341 xi->tx.req_prod_pvt++;
342 xi->tx_ring_free--;
343 txN->id = get_id_from_freelist(xi);
344 ASSERT(!xi->tx_shadows[txN->id].cb);
345 offset = MmGetMdlByteOffset(pi.curr_buffer) + pi.curr_mdl_offset;
346 pfn = MmGetMdlPfnArray(pi.curr_buffer)[offset >> PAGE_SHIFT];
347 txN->offset = (USHORT)offset & (PAGE_SIZE - 1);
348 txN->gref = xi->vectors.GntTbl_GrantAccess(xi->vectors.context, 0, (ULONG)pfn, FALSE, gref, (ULONG)'XNTX');
349 ASSERT(xi->tx_shadows[txN->id].gref == INVALID_GRANT_REF);
350 xi->tx_shadows[txN->id].gref = txN->gref;
351 //ASSERT(sg->Elements[sg_element].Length > sg_offset);
352 txN->size = (USHORT)length;
353 ASSERT(txN->offset + txN->size <= PAGE_SIZE);
354 ASSERT(txN->size);
355 ASSERT(txN->gref != INVALID_GRANT_REF);
356 remaining -= length;
357 tx_length += length;
358 }
359 tx0->size = tx0->size + txN->size;
360 txN->flags = NETTXF_more_data;
361 XenNet_EatData(&pi, length);
362 }
363 txN->flags &= ~NETTXF_more_data;
364 ASSERT(tx0->size == pi.total_length);
365 ASSERT(!xi->tx_shadows[txN->id].packet);
366 xi->tx_shadows[txN->id].packet = packet;
368 if (ndis_lso)
369 {
370 //KdPrint((__DRIVER_NAME " TcpLargeSendPacketInfo = %d\n", pi.tcp_length));
371 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length);
372 }
374 xi->stat_tx_ok++;
376 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
377 //FUNCTION_EXIT();
378 xi->tx_outstanding++;
379 return TRUE;
380 }
382 /* Called at DISPATCH_LEVEL with tx_lock held */
384 static VOID
385 XenNet_SendQueuedPackets(struct xennet_info *xi)
386 {
387 PLIST_ENTRY entry;
388 PNDIS_PACKET packet;
389 int notify;
391 //FUNCTION_ENTER();
393 if (xi->device_state->suspend_resume_state_pdo != SR_STATE_RUNNING)
394 return;
396 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
397 /* if empty, the above returns head*, not NULL */
398 while (entry != &xi->tx_waiting_pkt_list)
399 {
400 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
401 if (!XenNet_HWSendPacket(xi, packet))
402 {
403 //KdPrint((__DRIVER_NAME " No room for packet\n"));
404 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
405 break;
406 }
407 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
408 }
410 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx, notify);
411 if (notify)
412 {
413 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->event_channel);
414 }
415 //FUNCTION_EXIT();
416 }
418 //ULONG packets_outstanding = 0;
419 // Called at DISPATCH_LEVEL
420 static VOID
421 XenNet_TxBufferGC(PKDPC dpc, PVOID context, PVOID arg1, PVOID arg2)
422 {
423 struct xennet_info *xi = context;
424 RING_IDX cons, prod;
425 PNDIS_PACKET head = NULL, tail = NULL;
426 PNDIS_PACKET packet;
427 ULONG tx_packets = 0;
429 UNREFERENCED_PARAMETER(dpc);
430 UNREFERENCED_PARAMETER(arg1);
431 UNREFERENCED_PARAMETER(arg2);
433 //FUNCTION_ENTER();
435 if (!xi->connected)
436 return; /* a delayed DPC could let this come through... just do nothing */
437 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
439 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
441 if (xi->tx_shutting_down && !xi->tx_outstanding)
442 {
443 /* there is a chance that our Dpc had been queued just before the shutdown... */
444 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
445 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
446 return;
447 }
449 do {
450 prod = xi->tx.sring->rsp_prod;
451 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
453 for (cons = xi->tx.rsp_cons; cons != prod; cons++)
454 {
455 struct netif_tx_response *txrsp;
456 tx_shadow_t *shadow;
458 txrsp = RING_GET_RESPONSE(&xi->tx, cons);
460 xi->tx_ring_free++;
462 if (txrsp->status == NETIF_RSP_NULL)
463 {
464 continue;
465 }
467 shadow = &xi->tx_shadows[txrsp->id];
468 if (shadow->cb)
469 {
470 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, shadow->cb);
471 shadow->cb = NULL;
472 }
474 if (shadow->gref != INVALID_GRANT_REF)
475 {
476 xi->vectors.GntTbl_EndAccess(xi->vectors.context,
477 shadow->gref, FALSE, (ULONG)'XNTX');
478 shadow->gref = INVALID_GRANT_REF;
479 }
481 if (shadow->packet)
482 {
483 packet = shadow->packet;
484 *(PNDIS_PACKET *)&packet->MiniportReservedEx[0] = NULL;
485 if (head)
486 *(PNDIS_PACKET *)&tail->MiniportReservedEx[0] = packet;
487 else
488 head = packet;
489 tail = packet;
490 shadow->packet = NULL;
491 }
492 put_id_on_freelist(xi, txrsp->id);
493 }
495 xi->tx.rsp_cons = prod;
496 /* resist the temptation to set the event more than +1... it breaks things */
497 xi->tx.sring->rsp_event = prod + 1;
498 KeMemoryBarrier();
499 } while (prod != xi->tx.sring->rsp_prod);
501 /* if queued packets, send them now */
502 if (!xi->tx_shutting_down)
503 XenNet_SendQueuedPackets(xi);
505 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
507 /* must be done without holding any locks */
508 while (head)
509 {
510 packet = (PNDIS_PACKET)head;
511 head = *(PNDIS_PACKET *)&packet->MiniportReservedEx[0];
512 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
513 tx_packets++;
514 }
516 /* must be done after we have truly given back all packets */
517 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
518 xi->tx_outstanding -= tx_packets;
519 if (!xi->tx_outstanding && xi->tx_shutting_down)
520 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
521 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
523 if (xi->device_state->suspend_resume_state_pdo == SR_STATE_SUSPENDING
524 && xi->device_state->suspend_resume_state_fdo != SR_STATE_SUSPENDING
525 && xi->tx_id_free == NET_TX_RING_SIZE)
526 {
527 KdPrint((__DRIVER_NAME " Setting SR_STATE_SUSPENDING\n"));
528 xi->device_state->suspend_resume_state_fdo = SR_STATE_SUSPENDING;
529 KdPrint((__DRIVER_NAME " Notifying event channel %d\n", xi->device_state->pdo_event_channel));
530 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->device_state->pdo_event_channel);
531 }
533 //FUNCTION_EXIT();
534 }
536 // called at <= DISPATCH_LEVEL
537 VOID DDKAPI
538 XenNet_SendPackets(
539 IN NDIS_HANDLE MiniportAdapterContext,
540 IN PPNDIS_PACKET PacketArray,
541 IN UINT NumberOfPackets
542 )
543 {
544 struct xennet_info *xi = MiniportAdapterContext;
545 PNDIS_PACKET packet;
546 UINT i;
547 PLIST_ENTRY entry;
548 KIRQL OldIrql;
550 //FUNCTION_ENTER();
552 if (xi->inactive)
553 {
554 for (i = 0; i < NumberOfPackets; i++)
555 {
556 NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
557 }
558 return;
559 }
561 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
563 for (i = 0; i < NumberOfPackets; i++)
564 {
565 packet = PacketArray[i];
566 ASSERT(packet);
567 *(ULONG *)&packet->MiniportReservedEx = 0;
568 entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
569 InsertTailList(&xi->tx_waiting_pkt_list, entry);
570 }
572 XenNet_SendQueuedPackets(xi);
574 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
576 //FUNCTION_EXIT();
577 }
579 VOID
580 XenNet_CancelSendPackets(
581 NDIS_HANDLE MiniportAdapterContext,
582 PVOID CancelId)
583 {
584 struct xennet_info *xi = MiniportAdapterContext;
585 KIRQL old_irql;
586 PLIST_ENTRY entry;
587 PNDIS_PACKET packet;
588 PNDIS_PACKET head = NULL, tail = NULL;
589 BOOLEAN result;
591 FUNCTION_ENTER();
593 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
595 entry = xi->tx_waiting_pkt_list.Flink;
596 while (entry != &xi->tx_waiting_pkt_list)
597 {
598 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
599 entry = entry->Flink;
600 if (NDIS_GET_PACKET_CANCEL_ID(packet) == CancelId)
601 {
602 KdPrint((__DRIVER_NAME " Found packet to cancel %p\n", packet));
603 result = RemoveEntryList((PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)]);
604 ASSERT(result);
605 *(PNDIS_PACKET *)&packet->MiniportReservedEx[0] = NULL;
606 if (head)
607 *(PNDIS_PACKET *)&tail->MiniportReservedEx[0] = packet;
608 else
609 head = packet;
610 tail = packet;
611 }
612 }
614 KeReleaseSpinLock(&xi->tx_lock, old_irql);
616 while (head)
617 {
618 packet = (PNDIS_PACKET)head;
619 head = *(PNDIS_PACKET *)&packet->MiniportReservedEx[0];
620 KdPrint((__DRIVER_NAME " NdisMSendComplete(%p)\n", packet));
621 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_REQUEST_ABORTED);
622 }
624 FUNCTION_EXIT();
625 }
627 VOID
628 XenNet_TxResumeStart(xennet_info_t *xi)
629 {
630 UNREFERENCED_PARAMETER(xi);
632 FUNCTION_ENTER();
633 /* nothing to do here - all packets were already sent */
634 FUNCTION_EXIT();
635 }
637 VOID
638 XenNet_TxResumeEnd(xennet_info_t *xi)
639 {
640 KIRQL old_irql;
642 FUNCTION_ENTER();
644 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
645 XenNet_SendQueuedPackets(xi);
646 KeReleaseSpinLock(&xi->tx_lock, old_irql);
648 FUNCTION_EXIT();
649 }
651 BOOLEAN
652 XenNet_TxInit(xennet_info_t *xi)
653 {
654 USHORT i;
656 KeInitializeSpinLock(&xi->tx_lock);
657 KeInitializeDpc(&xi->tx_dpc, XenNet_TxBufferGC, xi);
658 /* dpcs are only serialised to a single processor */
659 KeSetTargetProcessorDpc(&xi->tx_dpc, 0);
660 //KeSetImportanceDpc(&xi->tx_dpc, HighImportance);
661 InitializeListHead(&xi->tx_waiting_pkt_list);
663 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
664 xi->tx_shutting_down = FALSE;
665 xi->tx_outstanding = 0;
666 xi->tx_ring_free = NET_TX_RING_SIZE;
668 NdisInitializeNPagedLookasideList(&xi->tx_lookaside_list, NULL, NULL, 0,
669 PAGE_SIZE, XENNET_POOL_TAG, 0);
671 xi->tx_id_free = 0;
672 for (i = 0; i < NET_TX_RING_SIZE; i++)
673 {
674 xi->tx_shadows[i].gref = INVALID_GRANT_REF;
675 xi->tx_shadows[i].cb = NULL;
676 put_id_on_freelist(xi, i);
677 }
679 return TRUE;
680 }
682 /*
683 The ring is completely closed down now. We just need to empty anything left
684 on our freelists and harvest anything left on the rings.
685 */
687 BOOLEAN
688 XenNet_TxShutdown(xennet_info_t *xi)
689 {
690 PLIST_ENTRY entry;
691 PNDIS_PACKET packet;
692 //PMDL mdl;
693 //ULONG i;
694 KIRQL OldIrql;
696 FUNCTION_ENTER();
698 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
699 xi->tx_shutting_down = TRUE;
700 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
702 while (xi->tx_outstanding)
703 {
704 KdPrint((__DRIVER_NAME " Waiting for %d remaining packets to be sent\n", xi->tx_outstanding));
705 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, NULL);
706 }
708 KeRemoveQueueDpc(&xi->tx_dpc);
709 KeFlushQueuedDpcs();
711 /* Free packets in tx queue */
712 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
713 while (entry != &xi->tx_waiting_pkt_list)
714 {
715 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
716 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
717 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
718 }
720 NdisDeleteNPagedLookasideList(&xi->tx_lookaside_list);
722 FUNCTION_EXIT();
724 return TRUE;
725 }