win-pvdrivers

view xennet/xennet_tx.c @ 639:16108e228997

Added tag 0.10.0.89 for changeset 70c3a7839b4e
author James Harper <james.harper@bendigoit.com.au>
date Sun Aug 23 14:19:50 2009 +1000 (2009-08-23)
parents ca22d5c09eed
children 27ac5655ce9a
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 static USHORT
24 get_id_from_freelist(struct xennet_info *xi)
25 {
26 ASSERT(xi->tx_id_free);
27 xi->tx_id_free--;
29 return xi->tx_id_list[xi->tx_id_free];
30 }
32 static VOID
33 put_id_on_freelist(struct xennet_info *xi, USHORT id)
34 {
35 xi->tx_id_list[xi->tx_id_free] = id;
36 xi->tx_id_free++;
37 }
39 static __inline shared_buffer_t *
40 get_cb_from_freelist(struct xennet_info *xi)
41 {
42 shared_buffer_t *cb;
44 //FUNCTION_ENTER();
45 if (xi->tx_cb_free == 0)
46 {
47 //FUNCTION_EXIT();
48 return NULL;
49 }
50 xi->tx_cb_free--;
51 //KdPrint((__DRIVER_NAME " xi->tx_cb_free = %d\n", xi->tx_cb_free));
52 //KdPrint((__DRIVER_NAME " xi->tx_cb_list[xi->tx_cb_free] = %d\n", xi->tx_cb_list[xi->tx_cb_free]));
53 cb = &xi->tx_cbs[xi->tx_cb_list[xi->tx_cb_free]];
54 //KdPrint((__DRIVER_NAME " cb = %p\n", cb));
55 //FUNCTION_EXIT();
56 return cb;
57 }
59 static __inline VOID
60 put_cb_on_freelist(struct xennet_info *xi, shared_buffer_t *cb)
61 {
62 //FUNCTION_ENTER();
64 //KdPrint((__DRIVER_NAME " cb = %p\n", cb));
65 //KdPrint((__DRIVER_NAME " xi->tx_cb_free = %d\n", xi->tx_cb_free));
66 ASSERT(cb);
67 xi->tx_cb_list[xi->tx_cb_free] = cb->id;
68 xi->tx_cb_free++;
69 //FUNCTION_EXIT();
70 }
72 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
75 /* Called at DISPATCH_LEVEL with tx_lock held */
76 /*
77 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
78 */
79 static BOOLEAN
80 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet)
81 {
82 struct netif_tx_request *tx0 = NULL;
83 struct netif_tx_request *txN = NULL;
84 struct netif_extra_info *ei = NULL;
85 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
86 //UINT total_packet_length;
87 ULONG mss = 0;
88 uint16_t flags = NETTXF_more_data;
89 packet_info_t pi;
90 BOOLEAN ndis_lso = FALSE;
91 BOOLEAN xen_gso = FALSE;
92 //ULONG remaining;
93 PSCATTER_GATHER_LIST sg = NULL;
94 ULONG sg_element = 0;
95 ULONG sg_offset = 0;
96 ULONG parse_result;
97 shared_buffer_t *coalesce_buf = NULL;
98 ULONG chunks = 0;
100 //FUNCTION_ENTER();
102 XenNet_ClearPacketInfo(&pi);
103 NdisQueryPacket(packet, NULL, (PUINT)&pi.mdl_count, &pi.first_buffer, (PUINT)&pi.total_length);
104 //KdPrint((__DRIVER_NAME " A - packet = %p, mdl_count = %d, total_length = %d\n", packet, pi.mdl_count, pi.total_length));
106 if (xi->config_sg)
107 {
108 parse_result = XenNet_ParsePacketHeader(&pi, NULL, 0);
109 }
110 else
111 {
112 coalesce_buf = get_cb_from_freelist(xi);
113 if (!coalesce_buf)
114 {
115 KdPrint((__DRIVER_NAME " Full on send - no free cb's\n"));
116 return FALSE;
117 }
118 parse_result = XenNet_ParsePacketHeader(&pi, coalesce_buf->virtual, pi.total_length);
119 }
121 //KdPrint((__DRIVER_NAME " B\n"));
123 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP)
124 {
125 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
126 packet, TcpIpChecksumPacketInfo);
127 if (csum_info->Transmit.NdisPacketChecksumV4)
128 {
129 if (csum_info->Transmit.NdisPacketIpChecksum && !xi->setting_csum.V4Transmit.IpChecksum)
130 {
131 KdPrint((__DRIVER_NAME " IpChecksum not enabled\n"));
132 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
133 //return TRUE;
134 }
135 if (csum_info->Transmit.NdisPacketTcpChecksum)
136 {
137 if (xi->setting_csum.V4Transmit.TcpChecksum)
138 {
139 flags |= NETTXF_csum_blank | NETTXF_data_validated;
140 }
141 else
142 {
143 KdPrint((__DRIVER_NAME " TcpChecksum not enabled\n"));
144 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
145 //return TRUE;
146 }
147 }
148 else if (csum_info->Transmit.NdisPacketUdpChecksum)
149 {
150 if (xi->setting_csum.V4Transmit.UdpChecksum)
151 {
152 flags |= NETTXF_csum_blank | NETTXF_data_validated;
153 }
154 else
155 {
156 KdPrint((__DRIVER_NAME " UdpChecksum not enabled\n"));
157 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
158 //return TRUE;
159 }
160 }
161 }
162 else if (csum_info->Transmit.NdisPacketChecksumV6)
163 {
164 KdPrint((__DRIVER_NAME " NdisPacketChecksumV6 not supported\n"));
165 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
166 //return TRUE;
167 }
168 }
170 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
172 if (mss)
173 {
174 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) != NDIS_PROTOCOL_ID_TCP_IP)
175 {
176 KdPrint((__DRIVER_NAME " mss specified when packet is not NDIS_PROTOCOL_ID_TCP_IP\n"));
177 }
178 ndis_lso = TRUE;
179 if (mss > xi->setting_max_offload)
180 {
181 KdPrint((__DRIVER_NAME " Requested MSS (%d) larger than allowed MSS (%d)\n", mss, xi->setting_max_offload));
182 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
183 //FUNCTION_EXIT();
184 return TRUE;
185 }
186 }
188 if (xi->config_sg)
189 {
190 sg = (PSCATTER_GATHER_LIST)NDIS_PER_PACKET_INFO_FROM_PACKET(packet, ScatterGatherListPacketInfo);
191 ASSERT(sg != NULL);
193 if (sg->NumberOfElements > 19)
194 {
195 KdPrint((__DRIVER_NAME " sg->NumberOfElements = %d\n", sg->NumberOfElements));
196 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
197 return TRUE; // we'll pretend we sent the packet here for now...
198 }
199 if (sg->NumberOfElements + !!ndis_lso > xi->tx_ring_free)
200 {
201 //KdPrint((__DRIVER_NAME " Full on send - required = %d, available = %d\n", sg->NumberOfElements + !!ndis_lso, xi->tx_ring_free));
202 //FUNCTION_EXIT();
203 return FALSE;
204 }
206 if (ndis_lso || (pi.header_length && pi.header_length > sg->Elements[sg_element].Length && pi.header == pi.header_data))
207 {
208 coalesce_buf = get_cb_from_freelist(xi);
209 if (!coalesce_buf)
210 {
211 KdPrint((__DRIVER_NAME " Full on send - no free cb's\n"));
212 return FALSE;
213 }
214 }
215 }
217 if (ndis_lso)
218 {
219 if (parse_result == PARSE_OK)
220 {
221 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
222 if (pi.tcp_length >= mss)
223 {
224 flags |= NETTXF_extra_info;
225 xen_gso = TRUE;
226 }
227 else
228 {
229 KdPrint((__DRIVER_NAME " large send specified when tcp_length < mss\n"));
230 }
231 }
232 else
233 {
234 KdPrint((__DRIVER_NAME " could not parse packet - no large send offload done\n"));
235 }
236 }
238 /*
239 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
240 * (C) rest of requests on the ring. Only (A) has csum flags.
241 */
243 //KdPrint((__DRIVER_NAME " C\n"));
244 /* (A) */
245 // if we coalesced the header then we want to put that on first, otherwise we put on the first sg element
246 tx0 = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
247 chunks++;
248 xi->tx_ring_free--;
249 tx0->id = 0xFFFF;
250 if (coalesce_buf)
251 {
252 ULONG remaining = pi.header_length;
253 //ASSERT(pi.header_length < TX_HEADER_BUFFER_SIZE);
254 //KdPrint((__DRIVER_NAME " D - header_length = %d\n", pi.header_length));
255 memcpy(coalesce_buf->virtual, pi.header, pi.header_length);
256 /* even though we haven't reported that we are capable of it, LSO demands that we calculate the IP Header checksum */
257 if (ndis_lso)
258 {
259 XenNet_SumIpHeader(coalesce_buf->virtual, pi.ip4_header_length);
260 }
261 tx0->gref = (grant_ref_t)(coalesce_buf->logical.QuadPart >> PAGE_SHIFT);
262 tx0->offset = (USHORT)coalesce_buf->logical.LowPart & (PAGE_SIZE - 1);
263 tx0->size = (USHORT)pi.header_length;
264 ASSERT(tx0->offset + tx0->size <= PAGE_SIZE);
265 ASSERT(tx0->size);
266 if (xi->config_sg)
267 {
268 /* TODO: if the next buffer contains only a small amount of data then put it on too */
269 while (remaining)
270 {
271 //KdPrint((__DRIVER_NAME " D - remaining = %d\n", remaining));
272 //KdPrint((__DRIVER_NAME " Da - sg_element = %d, sg->Elements[sg_element].Length = %d\n", sg_element, sg->Elements[sg_element].Length));
273 if (sg->Elements[sg_element].Length <= remaining)
274 {
275 remaining -= sg->Elements[sg_element].Length;
276 sg_element++;
277 }
278 else
279 {
280 sg_offset = remaining;
281 remaining = 0;
282 }
283 }
284 }
285 }
286 else
287 {
288 ASSERT(xi->config_sg);
289 tx0->gref = (grant_ref_t)(sg->Elements[sg_element].Address.QuadPart >> PAGE_SHIFT);
290 tx0->offset = (USHORT)sg->Elements[sg_element].Address.LowPart & (PAGE_SIZE - 1);
291 tx0->size = (USHORT)sg->Elements[sg_element].Length;
292 ASSERT(tx0->size);
293 sg_element++;
294 }
295 tx0->flags = flags;
296 txN = tx0;
297 xi->tx.req_prod_pvt++;
299 /* (B) */
300 if (xen_gso)
301 {
302 //KdPrint((__DRIVER_NAME " Using extra_info\n"));
303 ASSERT(flags & NETTXF_extra_info);
304 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
305 xi->tx_ring_free--;
306 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
307 ei->flags = 0;
308 ei->u.gso.size = (USHORT)mss;
309 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
310 ei->u.gso.pad = 0;
311 ei->u.gso.features = 0;
312 xi->tx.req_prod_pvt++;
313 }
315 //KdPrint((__DRIVER_NAME " F\n"));
316 if (xi->config_sg)
317 {
318 /* (C) - only if sg otherwise it was all sent on the first buffer */
319 while (sg_element < sg->NumberOfElements)
320 {
321 //KdPrint((__DRIVER_NAME " G - sg_element = %d, sg_offset = %d\n", sg_element, sg_offset));
322 //KdPrint((__DRIVER_NAME " H - address = %p, length = %d\n",
323 // sg->Elements[sg_element].Address.LowPart + sg_offset, sg->Elements[sg_element].Length - sg_offset));
324 txN = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
325 chunks++;
326 xi->tx_ring_free--;
327 txN->id = 0xFFFF;
328 txN->gref = (grant_ref_t)(sg->Elements[sg_element].Address.QuadPart >> PAGE_SHIFT);
329 ASSERT((sg->Elements[sg_element].Address.LowPart & (PAGE_SIZE - 1)) + sg_offset <= PAGE_SIZE);
330 txN->offset = (USHORT)(sg->Elements[sg_element].Address.LowPart + sg_offset) & (PAGE_SIZE - 1);
331 ASSERT(sg->Elements[sg_element].Length > sg_offset);
332 txN->size = (USHORT)(sg->Elements[sg_element].Length - sg_offset);
333 ASSERT(txN->offset + txN->size <= PAGE_SIZE);
334 ASSERT(txN->size);
335 tx0->size = tx0->size + txN->size;
336 txN->flags = NETTXF_more_data;
337 sg_offset = 0;
338 sg_element++;
339 xi->tx.req_prod_pvt++;
340 }
341 }
342 txN->flags &= ~NETTXF_more_data;
343 txN->id = get_id_from_freelist(xi);
344 //KdPrint((__DRIVER_NAME " send - id = %d\n", tx0->id));
345 //KdPrint((__DRIVER_NAME " TX: id = %d, cb = %p, xi->tx_shadows[txN->id].cb = %p\n", txN->id, coalesce_buf, xi->tx_shadows[txN->id].cb));
346 ASSERT(tx0->size == pi.total_length);
347 ASSERT(!xi->tx_shadows[txN->id].cb);
348 ASSERT(!xi->tx_shadows[txN->id].packet);
349 xi->tx_shadows[txN->id].packet = packet;
350 xi->tx_shadows[txN->id].cb = coalesce_buf;
352 if (ndis_lso)
353 {
354 //KdPrint((__DRIVER_NAME " TcpLargeSendPacketInfo = %d\n", pi.tcp_length));
355 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(pi.tcp_length);
356 }
358 if (chunks > 19)
359 {
360 KdPrint((__DRIVER_NAME " chunks = %d\n", chunks));
361 }
362 xi->stat_tx_ok++;
364 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
365 //FUNCTION_EXIT();
366 xi->tx_outstanding++;
367 return TRUE;
368 }
370 /* Called at DISPATCH_LEVEL with tx_lock held */
372 static VOID
373 XenNet_SendQueuedPackets(struct xennet_info *xi)
374 {
375 PLIST_ENTRY entry;
376 PNDIS_PACKET packet;
377 int notify;
379 //FUNCTION_ENTER();
381 if (xi->device_state->suspend_resume_state_pdo != SR_STATE_RUNNING)
382 return;
384 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
385 /* if empty, the above returns head*, not NULL */
386 while (entry != &xi->tx_waiting_pkt_list)
387 {
388 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
389 //KdPrint((__DRIVER_NAME " Packet ready to send\n"));
390 if (!XenNet_HWSendPacket(xi, packet))
391 {
392 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
393 break;
394 }
395 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
396 }
398 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx, notify);
399 if (notify)
400 {
401 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->event_channel);
402 }
403 //FUNCTION_EXIT();
404 }
406 //ULONG packets_outstanding = 0;
407 // Called at DISPATCH_LEVEL
408 VOID
409 XenNet_TxBufferGC(PKDPC dpc, PVOID context, PVOID arg1, PVOID arg2)
410 {
411 struct xennet_info *xi = context;
412 RING_IDX cons, prod;
413 PNDIS_PACKET head = NULL, tail = NULL;
414 PNDIS_PACKET packet;
416 UNREFERENCED_PARAMETER(dpc);
417 UNREFERENCED_PARAMETER(arg1);
418 UNREFERENCED_PARAMETER(arg2);
420 //FUNCTION_ENTER();
422 ASSERT(xi->connected);
423 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
425 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
427 if (xi->tx_shutting_down && !xi->tx_outstanding)
428 {
429 /* there is a chance that our Dpc had been queued just before the shutdown... */
430 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
431 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
432 return;
433 }
435 do {
436 prod = xi->tx.sring->rsp_prod;
437 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
439 for (cons = xi->tx.rsp_cons; cons != prod; cons++)
440 {
441 struct netif_tx_response *txrsp;
442 txrsp = RING_GET_RESPONSE(&xi->tx, cons);
444 xi->tx_ring_free++;
446 if (txrsp->status == NETIF_RSP_NULL || txrsp->id == 0xFFFF)
447 continue;
449 //KdPrint((__DRIVER_NAME " GC: id = %d, cb = %p\n", txrsp->id, xi->tx_shadows[txrsp->id].cb));
450 if (xi->tx_shadows[txrsp->id].cb)
451 {
452 put_cb_on_freelist(xi, xi->tx_shadows[txrsp->id].cb);
453 xi->tx_shadows[txrsp->id].cb = NULL;
454 }
456 if (xi->tx_shadows[txrsp->id].packet)
457 {
458 packet = xi->tx_shadows[txrsp->id].packet;
459 *(PNDIS_PACKET *)&packet->MiniportReservedEx[0] = NULL;
460 if (head)
461 *(PNDIS_PACKET *)&tail->MiniportReservedEx[0] = packet;
462 else
463 head = packet;
464 tail = packet;
465 xi->tx_shadows[txrsp->id].packet = NULL;
466 }
467 put_id_on_freelist(xi, txrsp->id);
468 }
470 xi->tx.rsp_cons = prod;
471 xi->tx.sring->rsp_event = prod + (NET_TX_RING_SIZE >> 2);
472 KeMemoryBarrier();
473 } while (prod != xi->tx.sring->rsp_prod);
475 /* if queued packets, send them now */
476 if (!xi->tx_shutting_down)
477 XenNet_SendQueuedPackets(xi);
479 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
481 while (head)
482 {
483 packet = (PNDIS_PACKET)head;
484 head = *(PNDIS_PACKET *)&packet->MiniportReservedEx[0];
485 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
486 xi->tx_outstanding--;
487 if (!xi->tx_outstanding && xi->tx_shutting_down)
488 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
489 }
491 if (xi->device_state->suspend_resume_state_pdo == SR_STATE_SUSPENDING
492 && xi->device_state->suspend_resume_state_fdo != SR_STATE_SUSPENDING
493 && xi->tx_id_free == NET_TX_RING_SIZE)
494 {
495 KdPrint((__DRIVER_NAME " Setting SR_STATE_SUSPENDING\n"));
496 xi->device_state->suspend_resume_state_fdo = SR_STATE_SUSPENDING;
497 KdPrint((__DRIVER_NAME " Notifying event channel %d\n", xi->device_state->pdo_event_channel));
498 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->device_state->pdo_event_channel);
499 }
501 //FUNCTION_EXIT();
502 }
504 // called at <= DISPATCH_LEVEL
505 VOID DDKAPI
506 XenNet_SendPackets(
507 IN NDIS_HANDLE MiniportAdapterContext,
508 IN PPNDIS_PACKET PacketArray,
509 IN UINT NumberOfPackets
510 )
511 {
512 struct xennet_info *xi = MiniportAdapterContext;
513 PNDIS_PACKET packet;
514 UINT i;
515 PLIST_ENTRY entry;
516 KIRQL OldIrql;
518 //FUNCTION_ENTER();
520 if (xi->inactive)
521 {
522 for (i = 0; i < NumberOfPackets; i++)
523 {
524 NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
525 }
526 return;
527 }
529 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
531 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ " (packets = %d, free_requests = %d)\n", NumberOfPackets, free_requests(xi)));
532 for (i = 0; i < NumberOfPackets; i++)
533 {
534 packet = PacketArray[i];
535 //packets_outstanding++;
536 //KdPrint(("+packet = %p\n", packet));
537 ASSERT(packet);
538 *(ULONG *)&packet->MiniportReservedEx = 0;
539 entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
540 InsertTailList(&xi->tx_waiting_pkt_list, entry);
541 }
543 XenNet_SendQueuedPackets(xi);
545 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
547 //FUNCTION_EXIT();
548 }
550 VOID
551 XenNet_CancelSendPackets(
552 NDIS_HANDLE MiniportAdapterContext,
553 PVOID CancelId)
554 {
555 struct xennet_info *xi = MiniportAdapterContext;
556 KIRQL old_irql;
557 PLIST_ENTRY entry;
558 PNDIS_PACKET packet;
559 PNDIS_PACKET head = NULL, tail = NULL;
561 FUNCTION_ENTER();
563 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
565 entry = xi->tx_waiting_pkt_list.Flink;
566 while (entry != &xi->tx_waiting_pkt_list)
567 {
568 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
569 entry = entry->Flink;
570 if (NDIS_GET_PACKET_CANCEL_ID(packet) == CancelId)
571 {
572 RemoveEntryList((PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)]);
573 *(PNDIS_PACKET *)&packet->MiniportReservedEx[0] = NULL;
574 if (head)
575 *(PNDIS_PACKET *)&tail->MiniportReservedEx[0] = packet;
576 else
577 head = packet;
578 tail = packet;
579 }
580 }
582 KeReleaseSpinLock(&xi->tx_lock, old_irql);
584 while (head)
585 {
586 packet = (PNDIS_PACKET)head;
587 head = *(PNDIS_PACKET *)&packet->MiniportReservedEx[0];
588 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_REQUEST_ABORTED);
589 }
591 FUNCTION_EXIT();
592 }
594 VOID
595 XenNet_TxResumeStart(xennet_info_t *xi)
596 {
597 UNREFERENCED_PARAMETER(xi);
599 FUNCTION_ENTER();
600 /* nothing to do here - all packets were already sent */
601 FUNCTION_EXIT();
602 }
604 VOID
605 XenNet_TxResumeEnd(xennet_info_t *xi)
606 {
607 KIRQL old_irql;
609 FUNCTION_ENTER();
611 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
612 XenNet_SendQueuedPackets(xi);
613 KeReleaseSpinLock(&xi->tx_lock, old_irql);
615 FUNCTION_EXIT();
616 }
618 BOOLEAN
619 XenNet_TxInit(xennet_info_t *xi)
620 {
621 USHORT i, j;
622 ULONG cb_size;
624 KeInitializeSpinLock(&xi->tx_lock);
625 KeInitializeDpc(&xi->tx_dpc, XenNet_TxBufferGC, xi);
626 /* dpcs are only serialised to a single processor */
627 KeSetTargetProcessorDpc(&xi->tx_dpc, 0);
628 //KeSetImportanceDpc(&xi->tx_dpc, HighImportance);
629 InitializeListHead(&xi->tx_waiting_pkt_list);
631 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
632 xi->tx_shutting_down = FALSE;
633 xi->tx_outstanding = 0;
634 xi->tx_ring_free = NET_TX_RING_SIZE;
636 if (xi->config_sg)
637 {
638 cb_size = TX_HEADER_BUFFER_SIZE;
639 }
640 else
641 {
642 cb_size = PAGE_SIZE;
643 }
645 for (i = 0; i < TX_COALESCE_BUFFERS / (PAGE_SIZE / cb_size); i++)
646 {
647 PVOID virtual;
648 NDIS_PHYSICAL_ADDRESS logical;
649 NdisMAllocateSharedMemory(xi->adapter_handle, PAGE_SIZE, TRUE, &virtual, &logical);
650 if (virtual == NULL)
651 break;
652 for (j = 0; j < PAGE_SIZE / cb_size; j++)
653 {
654 USHORT index = (USHORT)(i * (PAGE_SIZE / cb_size) + j);
655 xi->tx_cbs[index].id = index;
656 xi->tx_cbs[index].virtual = (PUCHAR)virtual + j * cb_size;
657 xi->tx_cbs[index].logical.QuadPart = logical.QuadPart + j * cb_size;
658 put_cb_on_freelist(xi, &xi->tx_cbs[index]);
659 }
660 }
661 if (i == 0)
662 KdPrint((__DRIVER_NAME " Unable to allocate any SharedMemory buffers\n"));
664 xi->tx_id_free = 0;
665 for (i = 0; i < NET_TX_RING_SIZE; i++)
666 {
667 put_id_on_freelist(xi, i);
668 }
670 return TRUE;
671 }
673 /*
674 The ring is completely closed down now. We just need to empty anything left
675 on our freelists and harvest anything left on the rings.
676 */
678 BOOLEAN
679 XenNet_TxShutdown(xennet_info_t *xi)
680 {
681 PLIST_ENTRY entry;
682 PNDIS_PACKET packet;
683 //PMDL mdl;
684 //ULONG i;
685 KIRQL OldIrql;
686 shared_buffer_t *cb;
688 FUNCTION_ENTER();
690 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
691 xi->tx_shutting_down = TRUE;
692 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
694 while (xi->tx_outstanding)
695 {
696 KdPrint((__DRIVER_NAME " Waiting for %d remaining packets to be sent\n", xi->tx_outstanding));
697 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, NULL);
698 }
700 KeRemoveQueueDpc(&xi->tx_dpc);
701 KeFlushQueuedDpcs();
703 /* Free packets in tx queue */
704 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
705 while (entry != &xi->tx_waiting_pkt_list)
706 {
707 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
708 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
709 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
710 }
712 //KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
714 while((cb = get_cb_from_freelist(xi)) != NULL)
715 {
716 /* only free the actual buffers which were aligned on a page boundary */
717 if ((PtrToUlong(cb->virtual) & (PAGE_SIZE - 1)) == 0)
718 NdisMFreeSharedMemory(xi->adapter_handle, PAGE_SIZE, TRUE, cb->virtual, cb->logical);
719 }
721 //KeReleaseSpinLock(&xi->tx_lock, OldIrql);
723 FUNCTION_EXIT();
725 return TRUE;
726 }