win-pvdrivers

view xennet/xennet_tx.c @ 685:c13ccf5a629b

Fixed a bug in the dma routines which was causing memory corruption. In some cases when Windows gave an MDL that was longer than the buffer to be dma'd, the end of the buffer would be overwritten. The only time I am aware of this occuring is on one particular map in Call Of Duty 4.

Split out the dma routines from xenpci_pdo.c into xenpci_dma.c
author James Harper <james.harper@bendigoit.com.au>
date Wed Oct 14 14:46:39 2009 +1100 (2009-10-14)
parents 27ac5655ce9a
children 5bdb7251370c
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 static USHORT
24 get_id_from_freelist(struct xennet_info *xi)
25 {
26 ASSERT(xi->tx_id_free);
27 xi->tx_id_free--;
29 return xi->tx_id_list[xi->tx_id_free];
30 }
32 static VOID
33 put_id_on_freelist(struct xennet_info *xi, USHORT id)
34 {
35 xi->tx_id_list[xi->tx_id_free] = id;
36 xi->tx_id_free++;
37 }
39 static __inline shared_buffer_t *
40 get_cb_from_freelist(struct xennet_info *xi)
41 {
42 shared_buffer_t *cb;
44 //FUNCTION_ENTER();
45 if (xi->tx_cb_free == 0)
46 {
47 //FUNCTION_EXIT();
48 return NULL;
49 }
50 xi->tx_cb_free--;
51 //KdPrint((__DRIVER_NAME " xi->tx_cb_free = %d\n", xi->tx_cb_free));
52 //KdPrint((__DRIVER_NAME " xi->tx_cb_list[xi->tx_cb_free] = %d\n", xi->tx_cb_list[xi->tx_cb_free]));
53 cb = &xi->tx_cbs[xi->tx_cb_list[xi->tx_cb_free]];
54 //KdPrint((__DRIVER_NAME " cb = %p\n", cb));
55 //FUNCTION_EXIT();
56 return cb;
57 }
59 static __inline VOID
60 put_cb_on_freelist(struct xennet_info *xi, shared_buffer_t *cb)
61 {
62 //FUNCTION_ENTER();
64 //KdPrint((__DRIVER_NAME " cb = %p\n", cb));
65 //KdPrint((__DRIVER_NAME " xi->tx_cb_free = %d\n", xi->tx_cb_free));
66 ASSERT(cb);
67 xi->tx_cb_list[xi->tx_cb_free] = cb->id;
68 xi->tx_cb_free++;
69 //FUNCTION_EXIT();
70 }
72 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
75 /* Called at DISPATCH_LEVEL with tx_lock held */
76 /*
77 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
78 */
79 static BOOLEAN
80 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet)
81 {
82 struct netif_tx_request *tx0 = NULL;
83 struct netif_tx_request *txN = NULL;
84 struct netif_extra_info *ei = NULL;
85 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
86 //UINT total_packet_length;
87 ULONG mss = 0;
88 uint16_t flags = NETTXF_more_data;
89 packet_info_t pi;
90 BOOLEAN ndis_lso = FALSE;
91 BOOLEAN xen_gso = FALSE;
92 //ULONG remaining;
93 PSCATTER_GATHER_LIST sg = NULL;
94 ULONG sg_element = 0;
95 ULONG sg_offset = 0;
96 ULONG parse_result;
97 shared_buffer_t *coalesce_buf = NULL;
98 ULONG chunks = 0;
100 //FUNCTION_ENTER();
102 XenNet_ClearPacketInfo(&pi);
103 NdisQueryPacket(packet, NULL, (PUINT)&pi.mdl_count, &pi.first_buffer, (PUINT)&pi.total_length);
104 //KdPrint((__DRIVER_NAME " A - packet = %p, mdl_count = %d, total_length = %d\n", packet, pi.mdl_count, pi.total_length));
106 if (xi->config_sg)
107 {
108 parse_result = XenNet_ParsePacketHeader(&pi, NULL, 0);
109 }
110 else
111 {
112 coalesce_buf = get_cb_from_freelist(xi);
113 if (!coalesce_buf)
114 {
115 KdPrint((__DRIVER_NAME " Full on send - no free cb's\n"));
116 return FALSE;
117 }
118 parse_result = XenNet_ParsePacketHeader(&pi, coalesce_buf->virtual, pi.total_length);
119 }
121 //KdPrint((__DRIVER_NAME " B\n"));
123 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP)
124 {
125 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
126 packet, TcpIpChecksumPacketInfo);
127 if (csum_info->Transmit.NdisPacketChecksumV4)
128 {
129 if (csum_info->Transmit.NdisPacketIpChecksum && !xi->setting_csum.V4Transmit.IpChecksum)
130 {
131 KdPrint((__DRIVER_NAME " IpChecksum not enabled\n"));
132 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
133 //return TRUE;
134 }
135 if (csum_info->Transmit.NdisPacketTcpChecksum)
136 {
137 if (xi->setting_csum.V4Transmit.TcpChecksum)
138 {
139 flags |= NETTXF_csum_blank | NETTXF_data_validated;
140 }
141 else
142 {
143 KdPrint((__DRIVER_NAME " TcpChecksum not enabled\n"));
144 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
145 //return TRUE;
146 }
147 }
148 else if (csum_info->Transmit.NdisPacketUdpChecksum)
149 {
150 if (xi->setting_csum.V4Transmit.UdpChecksum)
151 {
152 flags |= NETTXF_csum_blank | NETTXF_data_validated;
153 }
154 else
155 {
156 KdPrint((__DRIVER_NAME " UdpChecksum not enabled\n"));
157 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
158 //return TRUE;
159 }
160 }
161 }
162 else if (csum_info->Transmit.NdisPacketChecksumV6)
163 {
164 KdPrint((__DRIVER_NAME " NdisPacketChecksumV6 not supported\n"));
165 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
166 //return TRUE;
167 }
168 }
170 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
172 if (mss && parse_result == PARSE_OK)
173 {
174 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) != NDIS_PROTOCOL_ID_TCP_IP)
175 {
176 KdPrint((__DRIVER_NAME " mss specified when packet is not NDIS_PROTOCOL_ID_TCP_IP\n"));
177 }
178 ndis_lso = TRUE;
179 if (mss > xi->setting_max_offload)
180 {
181 KdPrint((__DRIVER_NAME " Requested MSS (%d) larger than allowed MSS (%d)\n", mss, xi->setting_max_offload));
182 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
183 //FUNCTION_EXIT();
184 return TRUE;
185 }
186 }
188 if (xi->config_sg)
189 {
190 sg = (PSCATTER_GATHER_LIST)NDIS_PER_PACKET_INFO_FROM_PACKET(packet, ScatterGatherListPacketInfo);
191 ASSERT(sg != NULL);
193 if (sg->NumberOfElements > 19)
194 {
195 KdPrint((__DRIVER_NAME " sg->NumberOfElements = %d\n", sg->NumberOfElements));
196 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
197 return TRUE; // we'll pretend we sent the packet here for now...
198 }
199 if (sg->NumberOfElements + !!ndis_lso > xi->tx_ring_free)
200 {
201 //KdPrint((__DRIVER_NAME " Full on send - required = %d, available = %d\n", sg->NumberOfElements + !!ndis_lso, xi->tx_ring_free));
202 //FUNCTION_EXIT();
203 return FALSE;
204 }
206 if (ndis_lso || (pi.header_length && pi.header_length > sg->Elements[sg_element].Length && pi.header == pi.header_data))
207 {
208 coalesce_buf = get_cb_from_freelist(xi);
209 if (!coalesce_buf)
210 {
211 KdPrint((__DRIVER_NAME " Full on send - no free cb's\n"));
212 return FALSE;
213 }
214 }
215 }
217 if (ndis_lso)
218 {
219 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
220 if (pi.tcp_length >= mss)
221 {
222 flags |= NETTXF_extra_info;
223 xen_gso = TRUE;
224 }
225 else
226 {
227 KdPrint((__DRIVER_NAME " large send specified when tcp_length < mss\n"));
228 }
229 }
231 /*
232 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
233 * (C) rest of requests on the ring. Only (A) has csum flags.
234 */
236 //KdPrint((__DRIVER_NAME " C\n"));
237 /* (A) */
238 // if we coalesced the header then we want to put that on first, otherwise we put on the first sg element
239 tx0 = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
240 chunks++;
241 xi->tx_ring_free--;
242 tx0->id = 0xFFFF;
243 if (coalesce_buf)
244 {
245 ULONG remaining = pi.header_length;
246 //ASSERT(pi.header_length < TX_HEADER_BUFFER_SIZE);
247 //KdPrint((__DRIVER_NAME " D - header_length = %d\n", pi.header_length));
248 memcpy(coalesce_buf->virtual, pi.header, pi.header_length);
249 /* even though we haven't reported that we are capable of it, LSO demands that we calculate the IP Header checksum */
250 if (ndis_lso)
251 {
252 XenNet_SumIpHeader(coalesce_buf->virtual, pi.ip4_header_length);
253 }
254 tx0->gref = (grant_ref_t)(coalesce_buf->logical.QuadPart >> PAGE_SHIFT);
255 tx0->offset = (USHORT)coalesce_buf->logical.LowPart & (PAGE_SIZE - 1);
256 tx0->size = (USHORT)pi.header_length;
257 ASSERT(tx0->offset + tx0->size <= PAGE_SIZE);
258 ASSERT(tx0->size);
259 if (xi->config_sg)
260 {
261 /* TODO: if the next buffer contains only a small amount of data then put it on too */
262 while (remaining)
263 {
264 //KdPrint((__DRIVER_NAME " D - remaining = %d\n", remaining));
265 //KdPrint((__DRIVER_NAME " Da - sg_element = %d, sg->Elements[sg_element].Length = %d\n", sg_element, sg->Elements[sg_element].Length));
266 if (sg->Elements[sg_element].Length <= remaining)
267 {
268 remaining -= sg->Elements[sg_element].Length;
269 sg_element++;
270 }
271 else
272 {
273 sg_offset = remaining;
274 remaining = 0;
275 }
276 }
277 }
278 }
279 else
280 {
281 ASSERT(xi->config_sg);
282 tx0->gref = (grant_ref_t)(sg->Elements[sg_element].Address.QuadPart >> PAGE_SHIFT);
283 tx0->offset = (USHORT)sg->Elements[sg_element].Address.LowPart & (PAGE_SIZE - 1);
284 tx0->size = (USHORT)sg->Elements[sg_element].Length;
285 ASSERT(tx0->size);
286 sg_element++;
287 }
288 tx0->flags = flags;
289 txN = tx0;
290 xi->tx.req_prod_pvt++;
292 /* (B) */
293 if (xen_gso)
294 {
295 //KdPrint((__DRIVER_NAME " Using extra_info\n"));
296 ASSERT(flags & NETTXF_extra_info);
297 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
298 xi->tx_ring_free--;
299 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
300 ei->flags = 0;
301 ei->u.gso.size = (USHORT)mss;
302 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
303 ei->u.gso.pad = 0;
304 ei->u.gso.features = 0;
305 xi->tx.req_prod_pvt++;
306 }
308 //KdPrint((__DRIVER_NAME " F\n"));
309 if (xi->config_sg)
310 {
311 /* (C) - only if sg otherwise it was all sent on the first buffer */
312 while (sg_element < sg->NumberOfElements)
313 {
314 //KdPrint((__DRIVER_NAME " G - sg_element = %d, sg_offset = %d\n", sg_element, sg_offset));
315 //KdPrint((__DRIVER_NAME " H - address = %p, length = %d\n",
316 // sg->Elements[sg_element].Address.LowPart + sg_offset, sg->Elements[sg_element].Length - sg_offset));
317 txN = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
318 chunks++;
319 xi->tx_ring_free--;
320 txN->id = 0xFFFF;
321 txN->gref = (grant_ref_t)(sg->Elements[sg_element].Address.QuadPart >> PAGE_SHIFT);
322 ASSERT((sg->Elements[sg_element].Address.LowPart & (PAGE_SIZE - 1)) + sg_offset <= PAGE_SIZE);
323 txN->offset = (USHORT)(sg->Elements[sg_element].Address.LowPart + sg_offset) & (PAGE_SIZE - 1);
324 ASSERT(sg->Elements[sg_element].Length > sg_offset);
325 txN->size = (USHORT)(sg->Elements[sg_element].Length - sg_offset);
326 ASSERT(txN->offset + txN->size <= PAGE_SIZE);
327 ASSERT(txN->size);
328 tx0->size = tx0->size + txN->size;
329 txN->flags = NETTXF_more_data;
330 sg_offset = 0;
331 sg_element++;
332 xi->tx.req_prod_pvt++;
333 }
334 }
335 txN->flags &= ~NETTXF_more_data;
336 txN->id = get_id_from_freelist(xi);
337 //KdPrint((__DRIVER_NAME " send - id = %d\n", tx0->id));
338 //KdPrint((__DRIVER_NAME " TX: id = %d, cb = %p, xi->tx_shadows[txN->id].cb = %p\n", txN->id, coalesce_buf, xi->tx_shadows[txN->id].cb));
339 ASSERT(tx0->size == pi.total_length);
340 ASSERT(!xi->tx_shadows[txN->id].cb);
341 ASSERT(!xi->tx_shadows[txN->id].packet);
342 xi->tx_shadows[txN->id].packet = packet;
343 xi->tx_shadows[txN->id].cb = coalesce_buf;
345 if (ndis_lso)
346 {
347 //KdPrint((__DRIVER_NAME " TcpLargeSendPacketInfo = %d\n", pi.tcp_length));
348 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(pi.tcp_length);
349 }
351 if (chunks > 19)
352 {
353 KdPrint((__DRIVER_NAME " chunks = %d\n", chunks));
354 }
355 xi->stat_tx_ok++;
357 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
358 //FUNCTION_EXIT();
359 xi->tx_outstanding++;
360 return TRUE;
361 }
363 /* Called at DISPATCH_LEVEL with tx_lock held */
365 static VOID
366 XenNet_SendQueuedPackets(struct xennet_info *xi)
367 {
368 PLIST_ENTRY entry;
369 PNDIS_PACKET packet;
370 int notify;
372 //FUNCTION_ENTER();
374 if (xi->device_state->suspend_resume_state_pdo != SR_STATE_RUNNING)
375 return;
377 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
378 /* if empty, the above returns head*, not NULL */
379 while (entry != &xi->tx_waiting_pkt_list)
380 {
381 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
382 //KdPrint((__DRIVER_NAME " Packet ready to send\n"));
383 if (!XenNet_HWSendPacket(xi, packet))
384 {
385 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
386 break;
387 }
388 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
389 }
391 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx, notify);
392 if (notify)
393 {
394 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->event_channel);
395 }
396 //FUNCTION_EXIT();
397 }
399 //ULONG packets_outstanding = 0;
400 // Called at DISPATCH_LEVEL
401 VOID
402 XenNet_TxBufferGC(PKDPC dpc, PVOID context, PVOID arg1, PVOID arg2)
403 {
404 struct xennet_info *xi = context;
405 RING_IDX cons, prod;
406 PNDIS_PACKET head = NULL, tail = NULL;
407 PNDIS_PACKET packet;
409 UNREFERENCED_PARAMETER(dpc);
410 UNREFERENCED_PARAMETER(arg1);
411 UNREFERENCED_PARAMETER(arg2);
413 //FUNCTION_ENTER();
415 ASSERT(xi->connected);
416 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
418 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
420 if (xi->tx_shutting_down && !xi->tx_outstanding)
421 {
422 /* there is a chance that our Dpc had been queued just before the shutdown... */
423 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
424 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
425 return;
426 }
428 do {
429 prod = xi->tx.sring->rsp_prod;
430 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
432 for (cons = xi->tx.rsp_cons; cons != prod; cons++)
433 {
434 struct netif_tx_response *txrsp;
435 txrsp = RING_GET_RESPONSE(&xi->tx, cons);
437 xi->tx_ring_free++;
439 if (txrsp->status == NETIF_RSP_NULL || txrsp->id == 0xFFFF)
440 continue;
442 //KdPrint((__DRIVER_NAME " GC: id = %d, cb = %p\n", txrsp->id, xi->tx_shadows[txrsp->id].cb));
443 if (xi->tx_shadows[txrsp->id].cb)
444 {
445 put_cb_on_freelist(xi, xi->tx_shadows[txrsp->id].cb);
446 xi->tx_shadows[txrsp->id].cb = NULL;
447 }
449 if (xi->tx_shadows[txrsp->id].packet)
450 {
451 packet = xi->tx_shadows[txrsp->id].packet;
452 *(PNDIS_PACKET *)&packet->MiniportReservedEx[0] = NULL;
453 if (head)
454 *(PNDIS_PACKET *)&tail->MiniportReservedEx[0] = packet;
455 else
456 head = packet;
457 tail = packet;
458 xi->tx_shadows[txrsp->id].packet = NULL;
459 }
460 put_id_on_freelist(xi, txrsp->id);
461 }
463 xi->tx.rsp_cons = prod;
464 xi->tx.sring->rsp_event = prod + (NET_TX_RING_SIZE >> 2);
465 KeMemoryBarrier();
466 } while (prod != xi->tx.sring->rsp_prod);
468 /* if queued packets, send them now */
469 if (!xi->tx_shutting_down)
470 XenNet_SendQueuedPackets(xi);
472 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
474 while (head)
475 {
476 packet = (PNDIS_PACKET)head;
477 head = *(PNDIS_PACKET *)&packet->MiniportReservedEx[0];
478 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
479 xi->tx_outstanding--;
480 if (!xi->tx_outstanding && xi->tx_shutting_down)
481 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
482 }
484 if (xi->device_state->suspend_resume_state_pdo == SR_STATE_SUSPENDING
485 && xi->device_state->suspend_resume_state_fdo != SR_STATE_SUSPENDING
486 && xi->tx_id_free == NET_TX_RING_SIZE)
487 {
488 KdPrint((__DRIVER_NAME " Setting SR_STATE_SUSPENDING\n"));
489 xi->device_state->suspend_resume_state_fdo = SR_STATE_SUSPENDING;
490 KdPrint((__DRIVER_NAME " Notifying event channel %d\n", xi->device_state->pdo_event_channel));
491 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->device_state->pdo_event_channel);
492 }
494 //FUNCTION_EXIT();
495 }
497 // called at <= DISPATCH_LEVEL
498 VOID DDKAPI
499 XenNet_SendPackets(
500 IN NDIS_HANDLE MiniportAdapterContext,
501 IN PPNDIS_PACKET PacketArray,
502 IN UINT NumberOfPackets
503 )
504 {
505 struct xennet_info *xi = MiniportAdapterContext;
506 PNDIS_PACKET packet;
507 UINT i;
508 PLIST_ENTRY entry;
509 KIRQL OldIrql;
511 //FUNCTION_ENTER();
513 if (xi->inactive)
514 {
515 for (i = 0; i < NumberOfPackets; i++)
516 {
517 NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
518 }
519 return;
520 }
522 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
524 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ " (packets = %d, free_requests = %d)\n", NumberOfPackets, free_requests(xi)));
525 for (i = 0; i < NumberOfPackets; i++)
526 {
527 packet = PacketArray[i];
528 //packets_outstanding++;
529 //KdPrint(("+packet = %p\n", packet));
530 ASSERT(packet);
531 *(ULONG *)&packet->MiniportReservedEx = 0;
532 entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
533 InsertTailList(&xi->tx_waiting_pkt_list, entry);
534 }
536 XenNet_SendQueuedPackets(xi);
538 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
540 //FUNCTION_EXIT();
541 }
543 VOID
544 XenNet_CancelSendPackets(
545 NDIS_HANDLE MiniportAdapterContext,
546 PVOID CancelId)
547 {
548 struct xennet_info *xi = MiniportAdapterContext;
549 KIRQL old_irql;
550 PLIST_ENTRY entry;
551 PNDIS_PACKET packet;
552 PNDIS_PACKET head = NULL, tail = NULL;
554 FUNCTION_ENTER();
556 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
558 entry = xi->tx_waiting_pkt_list.Flink;
559 while (entry != &xi->tx_waiting_pkt_list)
560 {
561 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
562 entry = entry->Flink;
563 if (NDIS_GET_PACKET_CANCEL_ID(packet) == CancelId)
564 {
565 RemoveEntryList((PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)]);
566 *(PNDIS_PACKET *)&packet->MiniportReservedEx[0] = NULL;
567 if (head)
568 *(PNDIS_PACKET *)&tail->MiniportReservedEx[0] = packet;
569 else
570 head = packet;
571 tail = packet;
572 }
573 }
575 KeReleaseSpinLock(&xi->tx_lock, old_irql);
577 while (head)
578 {
579 packet = (PNDIS_PACKET)head;
580 head = *(PNDIS_PACKET *)&packet->MiniportReservedEx[0];
581 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_REQUEST_ABORTED);
582 }
584 FUNCTION_EXIT();
585 }
587 VOID
588 XenNet_TxResumeStart(xennet_info_t *xi)
589 {
590 UNREFERENCED_PARAMETER(xi);
592 FUNCTION_ENTER();
593 /* nothing to do here - all packets were already sent */
594 FUNCTION_EXIT();
595 }
597 VOID
598 XenNet_TxResumeEnd(xennet_info_t *xi)
599 {
600 KIRQL old_irql;
602 FUNCTION_ENTER();
604 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
605 XenNet_SendQueuedPackets(xi);
606 KeReleaseSpinLock(&xi->tx_lock, old_irql);
608 FUNCTION_EXIT();
609 }
611 BOOLEAN
612 XenNet_TxInit(xennet_info_t *xi)
613 {
614 USHORT i, j;
615 ULONG cb_size;
617 KeInitializeSpinLock(&xi->tx_lock);
618 KeInitializeDpc(&xi->tx_dpc, XenNet_TxBufferGC, xi);
619 /* dpcs are only serialised to a single processor */
620 KeSetTargetProcessorDpc(&xi->tx_dpc, 0);
621 //KeSetImportanceDpc(&xi->tx_dpc, HighImportance);
622 InitializeListHead(&xi->tx_waiting_pkt_list);
624 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
625 xi->tx_shutting_down = FALSE;
626 xi->tx_outstanding = 0;
627 xi->tx_ring_free = NET_TX_RING_SIZE;
629 if (xi->config_sg)
630 {
631 cb_size = TX_HEADER_BUFFER_SIZE;
632 }
633 else
634 {
635 cb_size = PAGE_SIZE;
636 }
638 for (i = 0; i < TX_COALESCE_BUFFERS / (PAGE_SIZE / cb_size); i++)
639 {
640 PVOID virtual;
641 NDIS_PHYSICAL_ADDRESS logical;
642 NdisMAllocateSharedMemory(xi->adapter_handle, PAGE_SIZE, TRUE, &virtual, &logical);
643 if (virtual == NULL)
644 break;
645 for (j = 0; j < PAGE_SIZE / cb_size; j++)
646 {
647 USHORT index = (USHORT)(i * (PAGE_SIZE / cb_size) + j);
648 xi->tx_cbs[index].id = index;
649 xi->tx_cbs[index].virtual = (PUCHAR)virtual + j * cb_size;
650 xi->tx_cbs[index].logical.QuadPart = logical.QuadPart + j * cb_size;
651 put_cb_on_freelist(xi, &xi->tx_cbs[index]);
652 }
653 }
654 if (i == 0)
655 KdPrint((__DRIVER_NAME " Unable to allocate any SharedMemory buffers\n"));
657 xi->tx_id_free = 0;
658 for (i = 0; i < NET_TX_RING_SIZE; i++)
659 {
660 put_id_on_freelist(xi, i);
661 }
663 return TRUE;
664 }
666 /*
667 The ring is completely closed down now. We just need to empty anything left
668 on our freelists and harvest anything left on the rings.
669 */
671 BOOLEAN
672 XenNet_TxShutdown(xennet_info_t *xi)
673 {
674 PLIST_ENTRY entry;
675 PNDIS_PACKET packet;
676 //PMDL mdl;
677 //ULONG i;
678 KIRQL OldIrql;
679 shared_buffer_t *cb;
681 FUNCTION_ENTER();
683 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
684 xi->tx_shutting_down = TRUE;
685 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
687 while (xi->tx_outstanding)
688 {
689 KdPrint((__DRIVER_NAME " Waiting for %d remaining packets to be sent\n", xi->tx_outstanding));
690 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, NULL);
691 }
693 KeRemoveQueueDpc(&xi->tx_dpc);
694 KeFlushQueuedDpcs();
696 /* Free packets in tx queue */
697 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
698 while (entry != &xi->tx_waiting_pkt_list)
699 {
700 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
701 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
702 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
703 }
705 //KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
707 while((cb = get_cb_from_freelist(xi)) != NULL)
708 {
709 /* only free the actual buffers which were aligned on a page boundary */
710 if ((PtrToUlong(cb->virtual) & (PAGE_SIZE - 1)) == 0)
711 NdisMFreeSharedMemory(xi->adapter_handle, PAGE_SIZE, TRUE, cb->virtual, cb->logical);
712 }
714 //KeReleaseSpinLock(&xi->tx_lock, OldIrql);
716 FUNCTION_EXIT();
718 return TRUE;
719 }