win-pvdrivers

view xennet/xennet5_tx.c @ 1001:c21dd04d3ae6

Less noise for xennet6
author James Harper <james.harper@bendigoit.com.au>
date Fri Dec 14 21:26:22 2012 +1100 (2012-12-14)
parents 941699790045
children
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet5.h"
23 static USHORT
24 get_id_from_freelist(struct xennet_info *xi)
25 {
26 ASSERT(xi->tx_id_free);
27 xi->tx_id_free--;
29 return xi->tx_id_list[xi->tx_id_free];
30 }
32 static VOID
33 put_id_on_freelist(struct xennet_info *xi, USHORT id)
34 {
35 xi->tx_id_list[xi->tx_id_free] = id;
36 xi->tx_id_free++;
37 }
39 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
41 static __forceinline struct netif_tx_request *
42 XenNet_PutCbOnRing(struct xennet_info *xi, PVOID coalesce_buf, ULONG length, grant_ref_t gref)
43 {
44 struct netif_tx_request *tx;
45 tx = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
46 xi->tx.req_prod_pvt++;
47 xi->tx_ring_free--;
48 tx->id = get_id_from_freelist(xi);
49 ASSERT(xi->tx_shadows[tx->id].gref == INVALID_GRANT_REF);
50 ASSERT(!xi->tx_shadows[tx->id].cb);
51 xi->tx_shadows[tx->id].cb = coalesce_buf;
52 tx->gref = xi->vectors.GntTbl_GrantAccess(xi->vectors.context, (ULONG)(MmGetPhysicalAddress(coalesce_buf).QuadPart >> PAGE_SHIFT), FALSE, gref, (ULONG)'XNTX');
53 xi->tx_shadows[tx->id].gref = tx->gref;
54 tx->offset = 0;
55 tx->size = (USHORT)length;
56 ASSERT(tx->offset + tx->size <= PAGE_SIZE);
57 ASSERT(tx->size);
58 return tx;
59 }
61 /* Called at DISPATCH_LEVEL with tx_lock held */
62 /*
63 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
64 */
65 static BOOLEAN
66 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet)
67 {
68 struct netif_tx_request *tx0 = NULL;
69 struct netif_tx_request *txN = NULL;
70 struct netif_extra_info *ei = NULL;
71 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
72 ULONG mss = 0;
73 uint16_t flags = NETTXF_more_data;
74 packet_info_t pi;
75 BOOLEAN ndis_lso = FALSE;
76 BOOLEAN xen_gso = FALSE;
77 ULONG remaining;
78 ULONG parse_result;
79 ULONG frags = 0;
80 BOOLEAN coalesce_required = FALSE;
81 PVOID coalesce_buf;
82 ULONG coalesce_remaining = 0;
83 grant_ref_t gref;
84 ULONG tx_length = 0;
86 //FUNCTION_ENTER();
88 gref = xi->vectors.GntTbl_GetRef(xi->vectors.context, (ULONG)'XNTX');
89 if (gref == INVALID_GRANT_REF)
90 {
91 KdPrint((__DRIVER_NAME " out of grefs\n"));
92 return FALSE;
93 }
94 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
95 if (!coalesce_buf)
96 {
97 xi->vectors.GntTbl_PutRef(xi->vectors.context, gref, (ULONG)'XNTX');
98 KdPrint((__DRIVER_NAME " out of memory\n"));
99 return FALSE;
100 }
101 XenNet_ClearPacketInfo(&pi);
102 NdisQueryPacket(packet, NULL, (PUINT)&pi.mdl_count, &pi.first_buffer, (PUINT)&pi.total_length);
104 pi.curr_mdl_offset = 0;
105 pi.curr_buffer = pi.first_buffer;
106 remaining = min(pi.total_length, PAGE_SIZE);
107 while (remaining) /* this much gets put in the header */
108 {
109 ULONG length = XenNet_QueryData(&pi, remaining);
110 remaining -= length;
111 XenNet_EatData(&pi, length);
112 }
113 frags++;
114 if (pi.total_length > PAGE_SIZE) /* these are the frags we care about */
115 {
116 remaining = pi.total_length - PAGE_SIZE;
117 while (remaining)
118 {
119 ULONG length = XenNet_QueryData(&pi, PAGE_SIZE);
120 if (length != 0)
121 {
122 frags++;
123 if (frags > LINUX_MAX_SG_ELEMENTS)
124 break; /* worst case there could be hundreds of fragments - leave the loop now */
125 }
126 remaining -= length;
127 XenNet_EatData(&pi, length);
128 }
129 }
130 if (frags > LINUX_MAX_SG_ELEMENTS)
131 {
132 frags = LINUX_MAX_SG_ELEMENTS;
133 coalesce_required = TRUE;
134 }
136 /* if we have enough space on the ring then we have enough id's so no need to check for that */
137 if (xi->tx_ring_free < frags + 1)
138 {
139 xi->vectors.GntTbl_PutRef(xi->vectors.context, gref, (ULONG)'XNTX');
140 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
141 //KdPrint((__DRIVER_NAME " Full on send - ring full\n"));
142 return FALSE;
143 }
145 parse_result = XenNet_ParsePacketHeader(&pi, coalesce_buf, PAGE_SIZE);
146 remaining = pi.total_length - pi.header_length;
148 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP)
149 {
150 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
151 packet, TcpIpChecksumPacketInfo);
152 if (csum_info->Transmit.NdisPacketChecksumV4)
153 {
154 if (csum_info->Transmit.NdisPacketIpChecksum && !xi->setting_csum.V4Transmit.IpChecksum)
155 {
156 KdPrint((__DRIVER_NAME " IpChecksum not enabled\n"));
157 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
158 //return TRUE;
159 }
160 if (csum_info->Transmit.NdisPacketTcpChecksum)
161 {
162 if (xi->setting_csum.V4Transmit.TcpChecksum)
163 {
164 flags |= NETTXF_csum_blank | NETTXF_data_validated;
165 }
166 else
167 {
168 KdPrint((__DRIVER_NAME " TcpChecksum not enabled\n"));
169 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
170 //return TRUE;
171 }
172 }
173 else if (csum_info->Transmit.NdisPacketUdpChecksum)
174 {
175 if (xi->setting_csum.V4Transmit.UdpChecksum)
176 {
177 flags |= NETTXF_csum_blank | NETTXF_data_validated;
178 }
179 else
180 {
181 KdPrint((__DRIVER_NAME " UdpChecksum not enabled\n"));
182 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
183 //return TRUE;
184 }
185 }
186 }
187 else if (csum_info->Transmit.NdisPacketChecksumV6)
188 {
189 KdPrint((__DRIVER_NAME " NdisPacketChecksumV6 not supported\n"));
190 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
191 //return TRUE;
192 }
193 }
195 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
197 if (mss && parse_result == PARSE_OK)
198 {
199 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) != NDIS_PROTOCOL_ID_TCP_IP)
200 {
201 KdPrint((__DRIVER_NAME " mss specified when packet is not NDIS_PROTOCOL_ID_TCP_IP\n"));
202 }
203 ndis_lso = TRUE;
204 if (mss > xi->setting_max_offload)
205 {
206 KdPrint((__DRIVER_NAME " Requested MSS (%d) larger than allowed MSS (%d)\n", mss, xi->setting_max_offload));
207 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
208 //FUNCTION_EXIT();
209 return TRUE;
210 }
211 }
213 if (ndis_lso)
214 {
215 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
216 if (pi.tcp_length >= mss)
217 {
218 flags |= NETTXF_extra_info;
219 xen_gso = TRUE;
220 }
221 else
222 {
223 KdPrint((__DRIVER_NAME " large send specified when tcp_length < mss\n"));
224 }
225 }
227 /*
228 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
229 * (C) rest of requests on the ring. Only (A) has csum flags.
230 */
232 /* (A) */
233 tx0 = XenNet_PutCbOnRing(xi, coalesce_buf, pi.header_length, gref);
234 ASSERT(tx0); /* this will never happen */
235 tx0->flags = flags;
236 tx_length += pi.header_length;
238 /* even though we haven't reported that we are capable of it, LSO demands that we calculate the IP Header checksum */
239 if (ndis_lso)
240 {
241 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
242 }
243 txN = tx0;
245 /* (B) */
246 if (xen_gso)
247 {
248 ASSERT(flags & NETTXF_extra_info);
249 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
250 //KdPrint((__DRIVER_NAME " pos = %d\n", xi->tx.req_prod_pvt));
251 xi->tx.req_prod_pvt++;
252 xi->tx_ring_free--;
253 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
254 ei->flags = 0;
255 ei->u.gso.size = (USHORT)mss;
256 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
257 ei->u.gso.pad = 0;
258 ei->u.gso.features = 0;
259 }
261 ASSERT(xi->config_sg || !remaining);
263 /* (C) - only if data is remaining */
264 coalesce_buf = NULL;
265 while (remaining > 0)
266 {
267 ULONG length;
268 PFN_NUMBER pfn;
270 ASSERT(pi.curr_buffer);
271 if (coalesce_required)
272 {
273 PVOID va;
274 if (!coalesce_buf)
275 {
276 gref = xi->vectors.GntTbl_GetRef(xi->vectors.context, (ULONG)'XNTX');
277 if (gref == INVALID_GRANT_REF)
278 {
279 KdPrint((__DRIVER_NAME " out of grefs - partial send\n"));
280 break;
281 }
282 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
283 if (!coalesce_buf)
284 {
285 xi->vectors.GntTbl_PutRef(xi->vectors.context, gref, (ULONG)'XNTX');
286 KdPrint((__DRIVER_NAME " out of memory - partial send\n"));
287 break;
288 }
289 coalesce_remaining = min(PAGE_SIZE, remaining);
290 }
291 length = XenNet_QueryData(&pi, coalesce_remaining);
292 va = NdisBufferVirtualAddressSafe(pi.curr_buffer, LowPagePriority);
293 if (!va)
294 {
295 KdPrint((__DRIVER_NAME " failed to map buffer va - partial send\n"));
296 coalesce_remaining = 0;
297 remaining -= min(PAGE_SIZE, remaining);
298 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
299 }
300 else
301 {
302 memcpy((PUCHAR)coalesce_buf + min(PAGE_SIZE, remaining) - coalesce_remaining, (PUCHAR)va + pi.curr_mdl_offset, length);
303 coalesce_remaining -= length;
304 }
305 }
306 else
307 {
308 length = XenNet_QueryData(&pi, PAGE_SIZE);
309 }
310 if (!length || coalesce_remaining) /* sometimes there are zero length buffers... */
311 {
312 XenNet_EatData(&pi, length); /* do this so we actually move to the next buffer */
313 continue;
314 }
316 if (coalesce_buf)
317 {
318 if (remaining)
319 {
320 txN = XenNet_PutCbOnRing(xi, coalesce_buf, min(PAGE_SIZE, remaining), gref);
321 ASSERT(txN);
322 coalesce_buf = NULL;
323 remaining -= min(PAGE_SIZE, remaining);
324 tx_length += min(PAGE_SIZE, remaining);
325 }
326 }
327 else
328 {
329 ULONG offset;
331 gref = xi->vectors.GntTbl_GetRef(xi->vectors.context, (ULONG)'XNTX');
332 if (gref == INVALID_GRANT_REF)
333 {
334 KdPrint((__DRIVER_NAME " out of grefs - partial send\n"));
335 break;
336 }
337 txN = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
338 xi->tx.req_prod_pvt++;
339 xi->tx_ring_free--;
340 txN->id = get_id_from_freelist(xi);
341 ASSERT(!xi->tx_shadows[txN->id].cb);
342 offset = MmGetMdlByteOffset(pi.curr_buffer) + pi.curr_mdl_offset;
343 pfn = MmGetMdlPfnArray(pi.curr_buffer)[offset >> PAGE_SHIFT];
344 txN->offset = (USHORT)offset & (PAGE_SIZE - 1);
345 txN->gref = xi->vectors.GntTbl_GrantAccess(xi->vectors.context, (ULONG)pfn, FALSE, gref, (ULONG)'XNTX');
346 ASSERT(xi->tx_shadows[txN->id].gref == INVALID_GRANT_REF);
347 xi->tx_shadows[txN->id].gref = txN->gref;
348 //ASSERT(sg->Elements[sg_element].Length > sg_offset);
349 txN->size = (USHORT)length;
350 ASSERT(txN->offset + txN->size <= PAGE_SIZE);
351 ASSERT(txN->size);
352 ASSERT(txN->gref != INVALID_GRANT_REF);
353 remaining -= length;
354 tx_length += length;
355 }
356 tx0->size = tx0->size + txN->size;
357 txN->flags = NETTXF_more_data;
358 XenNet_EatData(&pi, length);
359 }
360 txN->flags &= ~NETTXF_more_data;
361 ASSERT(tx0->size == pi.total_length);
362 ASSERT(!xi->tx_shadows[txN->id].packet);
363 xi->tx_shadows[txN->id].packet = packet;
365 if (ndis_lso)
366 {
367 //KdPrint((__DRIVER_NAME " TcpLargeSendPacketInfo = %d\n", pi.tcp_length));
368 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length);
369 }
371 xi->stat_tx_ok++;
373 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
374 //FUNCTION_EXIT();
375 xi->tx_outstanding++;
376 return TRUE;
377 }
379 /* Called at DISPATCH_LEVEL with tx_lock held */
381 static VOID
382 XenNet_SendQueuedPackets(struct xennet_info *xi)
383 {
384 PLIST_ENTRY entry;
385 PNDIS_PACKET packet;
386 int notify;
388 //FUNCTION_ENTER();
390 if (xi->device_state->suspend_resume_state_pdo != SR_STATE_RUNNING)
391 return;
393 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
394 /* if empty, the above returns head*, not NULL */
395 while (entry != &xi->tx_waiting_pkt_list)
396 {
397 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
398 if (!XenNet_HWSendPacket(xi, packet))
399 {
400 //KdPrint((__DRIVER_NAME " No room for packet\n"));
401 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
402 break;
403 }
404 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
405 }
407 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx, notify);
408 if (notify)
409 {
410 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->event_channel);
411 }
412 //FUNCTION_EXIT();
413 }
415 //ULONG packets_outstanding = 0;
416 // Called at DISPATCH_LEVEL
417 VOID
418 XenNet_TxBufferGC(struct xennet_info *xi, BOOLEAN dont_set_event)
419 {
420 RING_IDX cons, prod;
421 PNDIS_PACKET head = NULL, tail = NULL;
422 PNDIS_PACKET packet;
423 ULONG tx_packets = 0;
425 //FUNCTION_ENTER();
427 if (!xi->connected)
428 return; /* a delayed DPC could let this come through... just do nothing */
429 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
431 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
433 if (xi->tx_shutting_down && !xi->tx_outstanding)
434 {
435 /* there is a chance that our Dpc had been queued just before the shutdown... */
436 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
437 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
438 return;
439 }
441 do {
442 prod = xi->tx.sring->rsp_prod;
443 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
445 for (cons = xi->tx.rsp_cons; cons != prod; cons++)
446 {
447 struct netif_tx_response *txrsp;
448 tx_shadow_t *shadow;
450 txrsp = RING_GET_RESPONSE(&xi->tx, cons);
452 xi->tx_ring_free++;
454 if (txrsp->status == NETIF_RSP_NULL)
455 {
456 continue;
457 }
459 shadow = &xi->tx_shadows[txrsp->id];
460 if (shadow->cb)
461 {
462 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, shadow->cb);
463 shadow->cb = NULL;
464 }
466 if (shadow->gref != INVALID_GRANT_REF)
467 {
468 xi->vectors.GntTbl_EndAccess(xi->vectors.context,
469 shadow->gref, FALSE, (ULONG)'XNTX');
470 shadow->gref = INVALID_GRANT_REF;
471 }
473 if (shadow->packet)
474 {
475 packet = shadow->packet;
476 *(PNDIS_PACKET *)&packet->MiniportReservedEx[0] = NULL;
477 if (head)
478 *(PNDIS_PACKET *)&tail->MiniportReservedEx[0] = packet;
479 else
480 head = packet;
481 tail = packet;
482 shadow->packet = NULL;
483 }
484 put_id_on_freelist(xi, txrsp->id);
485 }
487 xi->tx.rsp_cons = prod;
488 /* resist the temptation to set the event more than +1... it breaks things */
489 if (!dont_set_event)
490 xi->tx.sring->rsp_event = prod + 1;
491 KeMemoryBarrier();
492 } while (prod != xi->tx.sring->rsp_prod);
494 /* if queued packets, send them now */
495 if (!xi->tx_shutting_down)
496 XenNet_SendQueuedPackets(xi);
498 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
500 /* must be done without holding any locks */
501 while (head)
502 {
503 packet = (PNDIS_PACKET)head;
504 head = *(PNDIS_PACKET *)&packet->MiniportReservedEx[0];
505 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
506 tx_packets++;
507 }
509 /* must be done after we have truly given back all packets */
510 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
511 xi->tx_outstanding -= tx_packets;
512 if (!xi->tx_outstanding && xi->tx_shutting_down)
513 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
514 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
516 if (xi->device_state->suspend_resume_state_pdo == SR_STATE_SUSPENDING
517 && xi->device_state->suspend_resume_state_fdo != SR_STATE_SUSPENDING
518 && xi->tx_id_free == NET_TX_RING_SIZE)
519 {
520 KdPrint((__DRIVER_NAME " Setting SR_STATE_SUSPENDING\n"));
521 xi->device_state->suspend_resume_state_fdo = SR_STATE_SUSPENDING;
522 KdPrint((__DRIVER_NAME " Notifying event channel %d\n", xi->device_state->pdo_event_channel));
523 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->device_state->pdo_event_channel);
524 }
526 //FUNCTION_EXIT();
527 }
529 // called at <= DISPATCH_LEVEL
530 VOID
531 XenNet_SendPackets(
532 IN NDIS_HANDLE MiniportAdapterContext,
533 IN PPNDIS_PACKET PacketArray,
534 IN UINT NumberOfPackets
535 )
536 {
537 struct xennet_info *xi = MiniportAdapterContext;
538 PNDIS_PACKET packet;
539 UINT i;
540 PLIST_ENTRY entry;
541 KIRQL OldIrql;
543 //FUNCTION_ENTER();
545 if (xi->inactive)
546 {
547 for (i = 0; i < NumberOfPackets; i++)
548 {
549 NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
550 }
551 return;
552 }
554 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
556 for (i = 0; i < NumberOfPackets; i++)
557 {
558 packet = PacketArray[i];
559 ASSERT(packet);
560 *(ULONG *)&packet->MiniportReservedEx = 0;
561 entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
562 InsertTailList(&xi->tx_waiting_pkt_list, entry);
563 }
565 XenNet_SendQueuedPackets(xi);
567 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
569 //FUNCTION_EXIT();
570 }
572 VOID
573 XenNet_CancelSendPackets(
574 NDIS_HANDLE MiniportAdapterContext,
575 PVOID CancelId)
576 {
577 struct xennet_info *xi = MiniportAdapterContext;
578 KIRQL old_irql;
579 PLIST_ENTRY entry;
580 PNDIS_PACKET packet;
581 PNDIS_PACKET head = NULL, tail = NULL;
582 BOOLEAN result;
584 FUNCTION_ENTER();
586 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
588 entry = xi->tx_waiting_pkt_list.Flink;
589 while (entry != &xi->tx_waiting_pkt_list)
590 {
591 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
592 entry = entry->Flink;
593 if (NDIS_GET_PACKET_CANCEL_ID(packet) == CancelId)
594 {
595 KdPrint((__DRIVER_NAME " Found packet to cancel %p\n", packet));
596 result = RemoveEntryList((PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)]);
597 ASSERT(result);
598 *(PNDIS_PACKET *)&packet->MiniportReservedEx[0] = NULL;
599 if (head)
600 *(PNDIS_PACKET *)&tail->MiniportReservedEx[0] = packet;
601 else
602 head = packet;
603 tail = packet;
604 }
605 }
607 KeReleaseSpinLock(&xi->tx_lock, old_irql);
609 while (head)
610 {
611 packet = (PNDIS_PACKET)head;
612 head = *(PNDIS_PACKET *)&packet->MiniportReservedEx[0];
613 KdPrint((__DRIVER_NAME " NdisMSendComplete(%p)\n", packet));
614 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_REQUEST_ABORTED);
615 }
617 FUNCTION_EXIT();
618 }
620 VOID
621 XenNet_TxResumeStart(xennet_info_t *xi)
622 {
623 UNREFERENCED_PARAMETER(xi);
625 FUNCTION_ENTER();
626 /* nothing to do here - all packets were already sent */
627 FUNCTION_EXIT();
628 }
630 VOID
631 XenNet_TxResumeEnd(xennet_info_t *xi)
632 {
633 KIRQL old_irql;
635 FUNCTION_ENTER();
637 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
638 XenNet_SendQueuedPackets(xi);
639 KeReleaseSpinLock(&xi->tx_lock, old_irql);
641 FUNCTION_EXIT();
642 }
644 BOOLEAN
645 XenNet_TxInit(xennet_info_t *xi)
646 {
647 USHORT i;
649 KeInitializeSpinLock(&xi->tx_lock);
650 InitializeListHead(&xi->tx_waiting_pkt_list);
652 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
653 xi->tx_shutting_down = FALSE;
654 xi->tx_outstanding = 0;
655 xi->tx_ring_free = NET_TX_RING_SIZE;
657 NdisInitializeNPagedLookasideList(&xi->tx_lookaside_list, NULL, NULL, 0,
658 PAGE_SIZE, XENNET_POOL_TAG, 0);
660 xi->tx_id_free = 0;
661 for (i = 0; i < NET_TX_RING_SIZE; i++)
662 {
663 xi->tx_shadows[i].gref = INVALID_GRANT_REF;
664 xi->tx_shadows[i].cb = NULL;
665 put_id_on_freelist(xi, i);
666 }
668 return TRUE;
669 }
671 /*
672 The ring is completely closed down now. We just need to empty anything left
673 on our freelists and harvest anything left on the rings.
674 */
676 BOOLEAN
677 XenNet_TxShutdown(xennet_info_t *xi)
678 {
679 PLIST_ENTRY entry;
680 PNDIS_PACKET packet;
681 //PMDL mdl;
682 //ULONG i;
683 KIRQL OldIrql;
685 FUNCTION_ENTER();
687 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
688 xi->tx_shutting_down = TRUE;
689 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
691 while (xi->tx_outstanding)
692 {
693 KdPrint((__DRIVER_NAME " Waiting for %d remaining packets to be sent\n", xi->tx_outstanding));
694 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, NULL);
695 }
697 #if (NTDDI_VERSION >= NTDDI_WINXP)
698 KeFlushQueuedDpcs();
699 #endif
701 /* Free packets in tx queue */
702 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
703 while (entry != &xi->tx_waiting_pkt_list)
704 {
705 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
706 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
707 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
708 }
710 NdisDeleteNPagedLookasideList(&xi->tx_lookaside_list);
712 FUNCTION_EXIT();
714 return TRUE;
715 }