win-pvdrivers

view xennet/xennet_tx.c @ 821:9c0c4210b778

Fix xennet build under Windows 2000
Fix xenvbd install under Windows 2000
author James Harper <james.harper@bendigoit.com.au>
date Sat Oct 16 20:03:30 2010 +1100 (2010-10-16)
parents 467005e7f509
children 254b8424e23b
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 /* Not really necessary but keeps PREfast happy */
24 #if (NTDDI_VERSION >= NTDDI_WINXP)
25 static KDEFERRED_ROUTINE XenNet_TxBufferGC;
26 #endif
28 static USHORT
29 get_id_from_freelist(struct xennet_info *xi)
30 {
31 ASSERT(xi->tx_id_free);
32 xi->tx_id_free--;
34 return xi->tx_id_list[xi->tx_id_free];
35 }
37 static VOID
38 put_id_on_freelist(struct xennet_info *xi, USHORT id)
39 {
40 xi->tx_id_list[xi->tx_id_free] = id;
41 xi->tx_id_free++;
42 }
44 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
46 static __forceinline struct netif_tx_request *
47 XenNet_PutCbOnRing(struct xennet_info *xi, PVOID coalesce_buf, ULONG length, grant_ref_t gref)
48 {
49 struct netif_tx_request *tx;
50 tx = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
51 xi->tx.req_prod_pvt++;
52 xi->tx_ring_free--;
53 tx->id = get_id_from_freelist(xi);
54 ASSERT(xi->tx_shadows[tx->id].gref == INVALID_GRANT_REF);
55 ASSERT(!xi->tx_shadows[tx->id].cb);
56 xi->tx_shadows[tx->id].cb = coalesce_buf;
57 tx->gref = xi->vectors.GntTbl_GrantAccess(xi->vectors.context, 0, (ULONG)(MmGetPhysicalAddress(coalesce_buf).QuadPart >> PAGE_SHIFT), FALSE, gref, (ULONG)'XNTX');
58 xi->tx_shadows[tx->id].gref = tx->gref;
59 tx->offset = 0;
60 tx->size = (USHORT)length;
61 ASSERT(tx->offset + tx->size <= PAGE_SIZE);
62 ASSERT(tx->size);
63 return tx;
64 }
66 /* Called at DISPATCH_LEVEL with tx_lock held */
67 /*
68 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
69 */
70 static BOOLEAN
71 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet)
72 {
73 struct netif_tx_request *tx0 = NULL;
74 struct netif_tx_request *txN = NULL;
75 struct netif_extra_info *ei = NULL;
76 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
77 ULONG mss = 0;
78 uint16_t flags = NETTXF_more_data;
79 packet_info_t pi;
80 BOOLEAN ndis_lso = FALSE;
81 BOOLEAN xen_gso = FALSE;
82 ULONG remaining;
83 ULONG parse_result;
84 ULONG frags = 0;
85 BOOLEAN coalesce_required = FALSE;
86 PVOID coalesce_buf;
87 ULONG coalesce_remaining = 0;
88 grant_ref_t gref;
89 ULONG tx_length = 0;
91 //FUNCTION_ENTER();
93 gref = xi->vectors.GntTbl_GetRef(xi->vectors.context, (ULONG)'XNTX');
94 if (gref == INVALID_GRANT_REF)
95 {
96 KdPrint((__DRIVER_NAME " out of grefs\n"));
97 return FALSE;
98 }
99 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
100 if (!coalesce_buf)
101 {
102 xi->vectors.GntTbl_PutRef(xi->vectors.context, gref, (ULONG)'XNTX');
103 KdPrint((__DRIVER_NAME " out of memory\n"));
104 return FALSE;
105 }
106 XenNet_ClearPacketInfo(&pi);
107 NdisQueryPacket(packet, NULL, (PUINT)&pi.mdl_count, &pi.first_buffer, (PUINT)&pi.total_length);
109 pi.curr_mdl_offset = 0;
110 pi.curr_buffer = pi.first_buffer;
111 remaining = min(pi.total_length, PAGE_SIZE);
112 while (remaining) /* this much gets put in the header */
113 {
114 ULONG length = XenNet_QueryData(&pi, remaining);
115 remaining -= length;
116 XenNet_EatData(&pi, length);
117 }
118 frags++;
119 if (pi.total_length > PAGE_SIZE) /* these are the frags we care about */
120 {
121 remaining = pi.total_length - PAGE_SIZE;
122 while (remaining)
123 {
124 ULONG length = XenNet_QueryData(&pi, PAGE_SIZE);
125 if (length != 0)
126 {
127 frags++;
128 if (frags > LINUX_MAX_SG_ELEMENTS)
129 break; /* worst case there could be hundreds of fragments - leave the loop now */
130 }
131 remaining -= length;
132 XenNet_EatData(&pi, length);
133 }
134 }
135 if (frags > LINUX_MAX_SG_ELEMENTS)
136 {
137 frags = LINUX_MAX_SG_ELEMENTS;
138 coalesce_required = TRUE;
139 }
141 /* if we have enough space on the ring then we have enough id's so no need to check for that */
142 if (xi->tx_ring_free < frags + 1)
143 {
144 xi->vectors.GntTbl_PutRef(xi->vectors.context, gref, (ULONG)'XNTX');
145 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
146 //KdPrint((__DRIVER_NAME " Full on send - ring full\n"));
147 return FALSE;
148 }
150 parse_result = XenNet_ParsePacketHeader(&pi, coalesce_buf, PAGE_SIZE);
151 remaining = pi.total_length - pi.header_length;
153 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP)
154 {
155 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
156 packet, TcpIpChecksumPacketInfo);
157 if (csum_info->Transmit.NdisPacketChecksumV4)
158 {
159 if (csum_info->Transmit.NdisPacketIpChecksum && !xi->setting_csum.V4Transmit.IpChecksum)
160 {
161 KdPrint((__DRIVER_NAME " IpChecksum not enabled\n"));
162 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
163 //return TRUE;
164 }
165 if (csum_info->Transmit.NdisPacketTcpChecksum)
166 {
167 if (xi->setting_csum.V4Transmit.TcpChecksum)
168 {
169 flags |= NETTXF_csum_blank | NETTXF_data_validated;
170 }
171 else
172 {
173 KdPrint((__DRIVER_NAME " TcpChecksum not enabled\n"));
174 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
175 //return TRUE;
176 }
177 }
178 else if (csum_info->Transmit.NdisPacketUdpChecksum)
179 {
180 if (xi->setting_csum.V4Transmit.UdpChecksum)
181 {
182 flags |= NETTXF_csum_blank | NETTXF_data_validated;
183 }
184 else
185 {
186 KdPrint((__DRIVER_NAME " UdpChecksum not enabled\n"));
187 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
188 //return TRUE;
189 }
190 }
191 }
192 else if (csum_info->Transmit.NdisPacketChecksumV6)
193 {
194 KdPrint((__DRIVER_NAME " NdisPacketChecksumV6 not supported\n"));
195 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
196 //return TRUE;
197 }
198 }
200 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
202 if (mss && parse_result == PARSE_OK)
203 {
204 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) != NDIS_PROTOCOL_ID_TCP_IP)
205 {
206 KdPrint((__DRIVER_NAME " mss specified when packet is not NDIS_PROTOCOL_ID_TCP_IP\n"));
207 }
208 ndis_lso = TRUE;
209 if (mss > xi->setting_max_offload)
210 {
211 KdPrint((__DRIVER_NAME " Requested MSS (%d) larger than allowed MSS (%d)\n", mss, xi->setting_max_offload));
212 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
213 //FUNCTION_EXIT();
214 return TRUE;
215 }
216 }
218 if (ndis_lso)
219 {
220 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
221 if (pi.tcp_length >= mss)
222 {
223 flags |= NETTXF_extra_info;
224 xen_gso = TRUE;
225 }
226 else
227 {
228 KdPrint((__DRIVER_NAME " large send specified when tcp_length < mss\n"));
229 }
230 }
232 /*
233 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
234 * (C) rest of requests on the ring. Only (A) has csum flags.
235 */
237 /* (A) */
238 tx0 = XenNet_PutCbOnRing(xi, coalesce_buf, pi.header_length, gref);
239 ASSERT(tx0); /* this will never happen */
240 tx0->flags = flags;
241 tx_length += pi.header_length;
243 /* even though we haven't reported that we are capable of it, LSO demands that we calculate the IP Header checksum */
244 if (ndis_lso)
245 {
246 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
247 }
248 txN = tx0;
250 /* (B) */
251 if (xen_gso)
252 {
253 ASSERT(flags & NETTXF_extra_info);
254 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
255 //KdPrint((__DRIVER_NAME " pos = %d\n", xi->tx.req_prod_pvt));
256 xi->tx.req_prod_pvt++;
257 xi->tx_ring_free--;
258 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
259 ei->flags = 0;
260 ei->u.gso.size = (USHORT)mss;
261 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
262 ei->u.gso.pad = 0;
263 ei->u.gso.features = 0;
264 }
266 ASSERT(xi->config_sg || !remaining);
268 /* (C) - only if data is remaining */
269 coalesce_buf = NULL;
270 while (remaining > 0)
271 {
272 ULONG length;
273 PFN_NUMBER pfn;
275 ASSERT(pi.curr_buffer);
276 if (coalesce_required)
277 {
278 PVOID va;
279 if (!coalesce_buf)
280 {
281 gref = xi->vectors.GntTbl_GetRef(xi->vectors.context, (ULONG)'XNTX');
282 if (gref == INVALID_GRANT_REF)
283 {
284 KdPrint((__DRIVER_NAME " out of grefs - partial send\n"));
285 break;
286 }
287 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
288 if (!coalesce_buf)
289 {
290 xi->vectors.GntTbl_PutRef(xi->vectors.context, gref, (ULONG)'XNTX');
291 KdPrint((__DRIVER_NAME " out of memory - partial send\n"));
292 break;
293 }
294 coalesce_remaining = min(PAGE_SIZE, remaining);
295 }
296 length = XenNet_QueryData(&pi, coalesce_remaining);
297 va = NdisBufferVirtualAddressSafe(pi.curr_buffer, LowPagePriority);
298 if (!va)
299 {
300 KdPrint((__DRIVER_NAME " failed to map buffer va - partial send\n"));
301 coalesce_remaining = 0;
302 remaining -= min(PAGE_SIZE, remaining);
303 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
304 }
305 else
306 {
307 memcpy((PUCHAR)coalesce_buf + min(PAGE_SIZE, remaining) - coalesce_remaining, (PUCHAR)va + pi.curr_mdl_offset, length);
308 coalesce_remaining -= length;
309 }
310 }
311 else
312 {
313 length = XenNet_QueryData(&pi, PAGE_SIZE);
314 }
315 if (!length || coalesce_remaining) /* sometimes there are zero length buffers... */
316 {
317 XenNet_EatData(&pi, length); /* do this so we actually move to the next buffer */
318 continue;
319 }
321 if (coalesce_buf)
322 {
323 if (remaining)
324 {
325 txN = XenNet_PutCbOnRing(xi, coalesce_buf, min(PAGE_SIZE, remaining), gref);
326 ASSERT(txN);
327 coalesce_buf = NULL;
328 remaining -= min(PAGE_SIZE, remaining);
329 tx_length += min(PAGE_SIZE, remaining);
330 }
331 }
332 else
333 {
334 ULONG offset;
336 gref = xi->vectors.GntTbl_GetRef(xi->vectors.context, (ULONG)'XNTX');
337 if (gref == INVALID_GRANT_REF)
338 {
339 KdPrint((__DRIVER_NAME " out of grefs - partial send\n"));
340 break;
341 }
342 txN = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
343 xi->tx.req_prod_pvt++;
344 xi->tx_ring_free--;
345 txN->id = get_id_from_freelist(xi);
346 ASSERT(!xi->tx_shadows[txN->id].cb);
347 offset = MmGetMdlByteOffset(pi.curr_buffer) + pi.curr_mdl_offset;
348 pfn = MmGetMdlPfnArray(pi.curr_buffer)[offset >> PAGE_SHIFT];
349 txN->offset = (USHORT)offset & (PAGE_SIZE - 1);
350 txN->gref = xi->vectors.GntTbl_GrantAccess(xi->vectors.context, 0, (ULONG)pfn, FALSE, gref, (ULONG)'XNTX');
351 ASSERT(xi->tx_shadows[txN->id].gref == INVALID_GRANT_REF);
352 xi->tx_shadows[txN->id].gref = txN->gref;
353 //ASSERT(sg->Elements[sg_element].Length > sg_offset);
354 txN->size = (USHORT)length;
355 ASSERT(txN->offset + txN->size <= PAGE_SIZE);
356 ASSERT(txN->size);
357 ASSERT(txN->gref != INVALID_GRANT_REF);
358 remaining -= length;
359 tx_length += length;
360 }
361 tx0->size = tx0->size + txN->size;
362 txN->flags = NETTXF_more_data;
363 XenNet_EatData(&pi, length);
364 }
365 txN->flags &= ~NETTXF_more_data;
366 ASSERT(tx0->size == pi.total_length);
367 ASSERT(!xi->tx_shadows[txN->id].packet);
368 xi->tx_shadows[txN->id].packet = packet;
370 if (ndis_lso)
371 {
372 //KdPrint((__DRIVER_NAME " TcpLargeSendPacketInfo = %d\n", pi.tcp_length));
373 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length);
374 }
376 xi->stat_tx_ok++;
378 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
379 //FUNCTION_EXIT();
380 xi->tx_outstanding++;
381 return TRUE;
382 }
384 /* Called at DISPATCH_LEVEL with tx_lock held */
386 static VOID
387 XenNet_SendQueuedPackets(struct xennet_info *xi)
388 {
389 PLIST_ENTRY entry;
390 PNDIS_PACKET packet;
391 int notify;
393 //FUNCTION_ENTER();
395 if (xi->device_state->suspend_resume_state_pdo != SR_STATE_RUNNING)
396 return;
398 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
399 /* if empty, the above returns head*, not NULL */
400 while (entry != &xi->tx_waiting_pkt_list)
401 {
402 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
403 if (!XenNet_HWSendPacket(xi, packet))
404 {
405 //KdPrint((__DRIVER_NAME " No room for packet\n"));
406 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
407 break;
408 }
409 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
410 }
412 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx, notify);
413 if (notify)
414 {
415 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->event_channel);
416 }
417 //FUNCTION_EXIT();
418 }
420 //ULONG packets_outstanding = 0;
421 // Called at DISPATCH_LEVEL
422 static VOID
423 XenNet_TxBufferGC(PKDPC dpc, PVOID context, PVOID arg1, PVOID arg2)
424 {
425 struct xennet_info *xi = context;
426 RING_IDX cons, prod;
427 PNDIS_PACKET head = NULL, tail = NULL;
428 PNDIS_PACKET packet;
429 ULONG tx_packets = 0;
431 UNREFERENCED_PARAMETER(dpc);
432 UNREFERENCED_PARAMETER(arg1);
433 UNREFERENCED_PARAMETER(arg2);
435 //FUNCTION_ENTER();
437 if (!xi->connected)
438 return; /* a delayed DPC could let this come through... just do nothing */
439 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
441 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
443 if (xi->tx_shutting_down && !xi->tx_outstanding)
444 {
445 /* there is a chance that our Dpc had been queued just before the shutdown... */
446 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
447 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
448 return;
449 }
451 do {
452 prod = xi->tx.sring->rsp_prod;
453 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
455 for (cons = xi->tx.rsp_cons; cons != prod; cons++)
456 {
457 struct netif_tx_response *txrsp;
458 tx_shadow_t *shadow;
460 txrsp = RING_GET_RESPONSE(&xi->tx, cons);
462 xi->tx_ring_free++;
464 if (txrsp->status == NETIF_RSP_NULL)
465 {
466 continue;
467 }
469 shadow = &xi->tx_shadows[txrsp->id];
470 if (shadow->cb)
471 {
472 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, shadow->cb);
473 shadow->cb = NULL;
474 }
476 if (shadow->gref != INVALID_GRANT_REF)
477 {
478 xi->vectors.GntTbl_EndAccess(xi->vectors.context,
479 shadow->gref, FALSE, (ULONG)'XNTX');
480 shadow->gref = INVALID_GRANT_REF;
481 }
483 if (shadow->packet)
484 {
485 packet = shadow->packet;
486 *(PNDIS_PACKET *)&packet->MiniportReservedEx[0] = NULL;
487 if (head)
488 *(PNDIS_PACKET *)&tail->MiniportReservedEx[0] = packet;
489 else
490 head = packet;
491 tail = packet;
492 shadow->packet = NULL;
493 }
494 put_id_on_freelist(xi, txrsp->id);
495 }
497 xi->tx.rsp_cons = prod;
498 /* resist the temptation to set the event more than +1... it breaks things */
499 xi->tx.sring->rsp_event = prod + 1;
500 KeMemoryBarrier();
501 } while (prod != xi->tx.sring->rsp_prod);
503 /* if queued packets, send them now */
504 if (!xi->tx_shutting_down)
505 XenNet_SendQueuedPackets(xi);
507 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
509 /* must be done without holding any locks */
510 while (head)
511 {
512 packet = (PNDIS_PACKET)head;
513 head = *(PNDIS_PACKET *)&packet->MiniportReservedEx[0];
514 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
515 tx_packets++;
516 }
518 /* must be done after we have truly given back all packets */
519 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
520 xi->tx_outstanding -= tx_packets;
521 if (!xi->tx_outstanding && xi->tx_shutting_down)
522 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
523 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
525 if (xi->device_state->suspend_resume_state_pdo == SR_STATE_SUSPENDING
526 && xi->device_state->suspend_resume_state_fdo != SR_STATE_SUSPENDING
527 && xi->tx_id_free == NET_TX_RING_SIZE)
528 {
529 KdPrint((__DRIVER_NAME " Setting SR_STATE_SUSPENDING\n"));
530 xi->device_state->suspend_resume_state_fdo = SR_STATE_SUSPENDING;
531 KdPrint((__DRIVER_NAME " Notifying event channel %d\n", xi->device_state->pdo_event_channel));
532 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->device_state->pdo_event_channel);
533 }
535 //FUNCTION_EXIT();
536 }
538 // called at <= DISPATCH_LEVEL
539 VOID DDKAPI
540 XenNet_SendPackets(
541 IN NDIS_HANDLE MiniportAdapterContext,
542 IN PPNDIS_PACKET PacketArray,
543 IN UINT NumberOfPackets
544 )
545 {
546 struct xennet_info *xi = MiniportAdapterContext;
547 PNDIS_PACKET packet;
548 UINT i;
549 PLIST_ENTRY entry;
550 KIRQL OldIrql;
552 //FUNCTION_ENTER();
554 if (xi->inactive)
555 {
556 for (i = 0; i < NumberOfPackets; i++)
557 {
558 NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
559 }
560 return;
561 }
563 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
565 for (i = 0; i < NumberOfPackets; i++)
566 {
567 packet = PacketArray[i];
568 ASSERT(packet);
569 *(ULONG *)&packet->MiniportReservedEx = 0;
570 entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
571 InsertTailList(&xi->tx_waiting_pkt_list, entry);
572 }
574 XenNet_SendQueuedPackets(xi);
576 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
578 //FUNCTION_EXIT();
579 }
581 VOID
582 XenNet_CancelSendPackets(
583 NDIS_HANDLE MiniportAdapterContext,
584 PVOID CancelId)
585 {
586 struct xennet_info *xi = MiniportAdapterContext;
587 KIRQL old_irql;
588 PLIST_ENTRY entry;
589 PNDIS_PACKET packet;
590 PNDIS_PACKET head = NULL, tail = NULL;
591 BOOLEAN result;
593 FUNCTION_ENTER();
595 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
597 entry = xi->tx_waiting_pkt_list.Flink;
598 while (entry != &xi->tx_waiting_pkt_list)
599 {
600 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
601 entry = entry->Flink;
602 if (NDIS_GET_PACKET_CANCEL_ID(packet) == CancelId)
603 {
604 KdPrint((__DRIVER_NAME " Found packet to cancel %p\n", packet));
605 result = RemoveEntryList((PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)]);
606 ASSERT(result);
607 *(PNDIS_PACKET *)&packet->MiniportReservedEx[0] = NULL;
608 if (head)
609 *(PNDIS_PACKET *)&tail->MiniportReservedEx[0] = packet;
610 else
611 head = packet;
612 tail = packet;
613 }
614 }
616 KeReleaseSpinLock(&xi->tx_lock, old_irql);
618 while (head)
619 {
620 packet = (PNDIS_PACKET)head;
621 head = *(PNDIS_PACKET *)&packet->MiniportReservedEx[0];
622 KdPrint((__DRIVER_NAME " NdisMSendComplete(%p)\n", packet));
623 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_REQUEST_ABORTED);
624 }
626 FUNCTION_EXIT();
627 }
629 VOID
630 XenNet_TxResumeStart(xennet_info_t *xi)
631 {
632 UNREFERENCED_PARAMETER(xi);
634 FUNCTION_ENTER();
635 /* nothing to do here - all packets were already sent */
636 FUNCTION_EXIT();
637 }
639 VOID
640 XenNet_TxResumeEnd(xennet_info_t *xi)
641 {
642 KIRQL old_irql;
644 FUNCTION_ENTER();
646 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
647 XenNet_SendQueuedPackets(xi);
648 KeReleaseSpinLock(&xi->tx_lock, old_irql);
650 FUNCTION_EXIT();
651 }
653 BOOLEAN
654 XenNet_TxInit(xennet_info_t *xi)
655 {
656 USHORT i;
658 KeInitializeSpinLock(&xi->tx_lock);
659 KeInitializeDpc(&xi->tx_dpc, XenNet_TxBufferGC, xi);
660 /* dpcs are only serialised to a single processor */
661 KeSetTargetProcessorDpc(&xi->tx_dpc, 0);
662 //KeSetImportanceDpc(&xi->tx_dpc, HighImportance);
663 InitializeListHead(&xi->tx_waiting_pkt_list);
665 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
666 xi->tx_shutting_down = FALSE;
667 xi->tx_outstanding = 0;
668 xi->tx_ring_free = NET_TX_RING_SIZE;
670 NdisInitializeNPagedLookasideList(&xi->tx_lookaside_list, NULL, NULL, 0,
671 PAGE_SIZE, XENNET_POOL_TAG, 0);
673 xi->tx_id_free = 0;
674 for (i = 0; i < NET_TX_RING_SIZE; i++)
675 {
676 xi->tx_shadows[i].gref = INVALID_GRANT_REF;
677 xi->tx_shadows[i].cb = NULL;
678 put_id_on_freelist(xi, i);
679 }
681 return TRUE;
682 }
684 /*
685 The ring is completely closed down now. We just need to empty anything left
686 on our freelists and harvest anything left on the rings.
687 */
689 BOOLEAN
690 XenNet_TxShutdown(xennet_info_t *xi)
691 {
692 PLIST_ENTRY entry;
693 PNDIS_PACKET packet;
694 //PMDL mdl;
695 //ULONG i;
696 KIRQL OldIrql;
698 FUNCTION_ENTER();
700 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
701 xi->tx_shutting_down = TRUE;
702 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
704 while (xi->tx_outstanding)
705 {
706 KdPrint((__DRIVER_NAME " Waiting for %d remaining packets to be sent\n", xi->tx_outstanding));
707 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, NULL);
708 }
710 KeRemoveQueueDpc(&xi->tx_dpc);
711 #if (NTDDI_VERSION >= NTDDI_WINXP)
712 KeFlushQueuedDpcs();
713 #endif
715 /* Free packets in tx queue */
716 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
717 while (entry != &xi->tx_waiting_pkt_list)
718 {
719 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
720 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
721 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
722 }
724 NdisDeleteNPagedLookasideList(&xi->tx_lookaside_list);
726 FUNCTION_EXIT();
728 return TRUE;
729 }