win-pvdrivers

view xennet/xennet_tx.c @ 1007:4cda50fe71d5

merge ndis5 and ndis6 code in xennet
author James Harper <james.harper@bendigoit.com.au>
date Sun Feb 10 23:14:05 2013 +1100 (2013-02-10)
parents 3c7778b9f877
children c21a4feb4a27
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
24 static USHORT
25 get_id_from_freelist(struct xennet_info *xi)
26 {
27 NT_ASSERT(xi->tx_id_free);
28 xi->tx_id_free--;
30 return xi->tx_id_list[xi->tx_id_free];
31 }
33 static VOID
34 put_id_on_freelist(struct xennet_info *xi, USHORT id)
35 {
36 xi->tx_id_list[xi->tx_id_free] = id;
37 xi->tx_id_free++;
38 }
40 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
42 static __forceinline struct netif_tx_request *
43 XenNet_PutCbOnRing(struct xennet_info *xi, PVOID coalesce_buf, ULONG length, grant_ref_t gref)
44 {
45 struct netif_tx_request *tx;
46 tx = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
47 xi->tx_ring.req_prod_pvt++;
48 xi->tx_ring_free--;
49 tx->id = get_id_from_freelist(xi);
50 NT_ASSERT(xi->tx_shadows[tx->id].gref == INVALID_GRANT_REF);
51 NT_ASSERT(!xi->tx_shadows[tx->id].cb);
52 xi->tx_shadows[tx->id].cb = coalesce_buf;
53 tx->gref = XnGrantAccess(xi->handle, (ULONG)(MmGetPhysicalAddress(coalesce_buf).QuadPart >> PAGE_SHIFT), FALSE, gref, (ULONG)'XNTX');
54 xi->tx_shadows[tx->id].gref = tx->gref;
55 tx->offset = 0;
56 tx->size = (USHORT)length;
57 NT_ASSERT(tx->offset + tx->size <= PAGE_SIZE);
58 NT_ASSERT(tx->size);
59 return tx;
60 }
62 static VOID dump_packet_data(PNDIS_PACKET packet, PCHAR header) {
63 UINT mdl_count;
64 PMDL first_mdl;
65 UINT total_length;
67 NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &first_mdl, (PUINT)&total_length);
68 FUNCTION_MSG("%s mdl_count = %d, first_mdl = %p, total_length = %d\n", header, mdl_count, first_mdl, total_length);
69 }
71 /* Called at DISPATCH_LEVEL with tx_lock held */
72 /*
73 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
74 */
75 static BOOLEAN
76 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet)
77 {
78 struct netif_tx_request *tx0 = NULL;
79 struct netif_tx_request *txN = NULL;
80 struct netif_extra_info *ei = NULL;
81 ULONG mss = 0;
82 #if NTDDI_VERSION < NTDDI_VISTA
83 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
84 #else
85 NDIS_TCP_LARGE_SEND_OFFLOAD_NET_BUFFER_LIST_INFO lso_info;
86 NDIS_TCP_IP_CHECKSUM_NET_BUFFER_LIST_INFO csum_info;
87 #endif
88 uint16_t flags = NETTXF_more_data;
89 packet_info_t pi;
90 BOOLEAN ndis_lso = FALSE;
91 BOOLEAN xen_gso = FALSE;
92 ULONG remaining;
93 ULONG frags = 0;
94 BOOLEAN coalesce_required = FALSE;
95 PVOID coalesce_buf;
96 ULONG coalesce_remaining = 0;
97 grant_ref_t gref;
98 ULONG tx_length = 0;
99 UINT mdl_count;
101 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
102 if (gref == INVALID_GRANT_REF)
103 {
104 FUNCTION_MSG("out of grefs\n");
105 return FALSE;
106 }
107 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
108 if (!coalesce_buf)
109 {
110 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
111 FUNCTION_MSG("out of memory\n");
112 return FALSE;
113 }
114 XenNet_ClearPacketInfo(&pi);
115 #if NTDDI_VERSION < NTDDI_VISTA
116 NdisQueryPacket(packet, NULL, (PUINT)&mdl_count, &pi.first_mdl, (PUINT)&pi.total_length);
117 pi.curr_mdl = pi.first_mdl;
118 #else
119 /* create a new MDL over the data portion of the first MDL in the packet... it's just easier this way */
120 IoBuildPartialMdl(packet->CurrentMdl,
121 &pi.first_mdl_storage,
122 (PUCHAR)MmGetMdlVirtualAddress(nb->CurrentMdl) + nb->CurrentMdlOffset,
123 MmGetMdlByteCount(nb->CurrentMdl) - nb->CurrentMdlOffset);
124 pi.total_length = nb->DataLength;
125 pi.first_mdl_storage.Next = nb->CurrentMdl->Next;
126 pi.first_mdl = pi.curr_mdl = &pi.first_mdl_storage;
127 #endif
128 pi.first_mdl_offset = pi.curr_mdl_offset = 0;
129 remaining = min(pi.total_length, PAGE_SIZE);
130 while (remaining) { /* this much gets put in the header */
131 ULONG length = XenNet_QueryData(&pi, remaining);
132 remaining -= length;
133 XenNet_EatData(&pi, length);
134 }
135 frags++;
136 if (pi.total_length > PAGE_SIZE) { /* these are the frags we care about */
137 remaining = pi.total_length - PAGE_SIZE;
138 while (remaining) {
139 ULONG length = XenNet_QueryData(&pi, PAGE_SIZE);
140 if (length != 0) {
141 frags++;
142 if (frags > LINUX_MAX_SG_ELEMENTS)
143 break; /* worst case there could be hundreds of fragments - leave the loop now */
144 }
145 remaining -= length;
146 XenNet_EatData(&pi, length);
147 }
148 }
149 if (frags > LINUX_MAX_SG_ELEMENTS) {
150 frags = LINUX_MAX_SG_ELEMENTS;
151 coalesce_required = TRUE;
152 }
154 /* if we have enough space on the ring then we have enough id's so no need to check for that */
155 if (xi->tx_ring_free < frags + 1) {
156 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
157 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
158 //FUNCTION_MSG("Full on send - ring full\n");
159 return FALSE;
160 }
161 XenNet_ParsePacketHeader(&pi, coalesce_buf, PAGE_SIZE);
162 remaining = pi.total_length - pi.header_length;
163 if (pi.ip_version == 4 && pi.ip_proto == 6 && pi.ip4_length == 0) {
164 *((PUSHORT)(pi.header + 0x10)) = GET_NET_USHORT((USHORT)pi.total_length - XN_HDR_SIZE);
165 }
167 #if NTDDI_VERSION < NTDDI_VISTA
168 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP) {
169 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
170 packet, TcpIpChecksumPacketInfo);
171 if (csum_info->Transmit.NdisPacketChecksumV4) {
172 if (csum_info->Transmit.NdisPacketTcpChecksum) {
173 flags |= NETTXF_csum_blank | NETTXF_data_validated;
174 } else if (csum_info->Transmit.NdisPacketUdpChecksum) {
175 flags |= NETTXF_csum_blank | NETTXF_data_validated;
176 }
177 }
178 }
179 #else
180 csum_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(nb), TcpIpChecksumNetBufferListInfo);
181 if (csum_info.Transmit.IsIPv4) {
182 if (csum_info.Transmit.TcpChecksum) {
183 flags |= NETTXF_csum_blank | NETTXF_data_validated;
184 } else if (csum_info.Transmit.UdpChecksum) {
185 flags |= NETTXF_csum_blank | NETTXF_data_validated;
186 }
187 } else if (csum_info.Transmit.IsIPv6) {
188 KdPrint((__DRIVER_NAME " Transmit.IsIPv6 not supported\n"));
189 }
190 #endif
192 #if NTDDI_VERSION < NTDDI_VISTA
193 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
194 #else
195 lso_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(nb), TcpLargeSendNetBufferListInfo);
196 switch (lso_info.Transmit.Type) {
197 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
198 mss = lso_info.LsoV1Transmit.MSS;
199 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
200 break;
201 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
202 mss = lso_info.LsoV2Transmit.MSS;
203 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
204 break;
205 }
206 #endif
207 if (mss && pi.parse_result == PARSE_OK) {
208 ndis_lso = TRUE;
209 }
211 if (ndis_lso) {
212 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
213 if (pi.tcp_length >= mss) {
214 flags |= NETTXF_extra_info;
215 xen_gso = TRUE;
216 }
217 }
218 /*
219 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
220 * (C) rest of requests on the ring. Only (A) has csum flags.
221 */
223 /* (A) */
224 tx0 = XenNet_PutCbOnRing(xi, coalesce_buf, pi.header_length, gref);
225 NT_ASSERT(tx0); /* this will never happen */
226 tx0->flags = flags;
227 tx_length += pi.header_length;
229 /* lso implies IpHeaderChecksum */
230 #if NTDDI_VERSION < NTDDI_VISTA
231 if (ndis_lso) {
232 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
233 }
234 #else
235 if (ndis_lso || csum_info.Transmit.IpHeaderChecksum) {
236 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
237 }
238 #endif
239 txN = tx0;
241 /* (B) */
242 if (xen_gso) {
243 NT_ASSERT(flags & NETTXF_extra_info);
244 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
245 //KdPrint((__DRIVER_NAME " pos = %d\n", xi->tx_ring.req_prod_pvt));
246 xi->tx_ring.req_prod_pvt++;
247 xi->tx_ring_free--;
248 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
249 ei->flags = 0;
250 ei->u.gso.size = (USHORT)mss;
251 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
252 ei->u.gso.pad = 0;
253 ei->u.gso.features = 0;
254 }
256 NT_ASSERT(xi->current_sg_supported || !remaining);
258 /* (C) - only if data is remaining */
259 coalesce_buf = NULL;
260 while (remaining > 0) {
261 ULONG length;
262 PFN_NUMBER pfn;
264 NT_ASSERT(pi.curr_mdl);
265 if (coalesce_required) {
266 PVOID va;
267 if (!coalesce_buf) {
268 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
269 if (gref == INVALID_GRANT_REF) {
270 KdPrint((__DRIVER_NAME " out of grefs - partial send\n"));
271 break;
272 }
273 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
274 if (!coalesce_buf) {
275 XnFreeGrant(xi->handle, gref, (ULONG)'XNTX');
276 KdPrint((__DRIVER_NAME " out of memory - partial send\n"));
277 break;
278 }
279 coalesce_remaining = min(PAGE_SIZE, remaining);
280 }
281 length = XenNet_QueryData(&pi, coalesce_remaining);
282 va = NdisBufferVirtualAddressSafe(pi.curr_mdl, LowPagePriority);
283 if (!va) {
284 KdPrint((__DRIVER_NAME " failed to map buffer va - partial send\n"));
285 coalesce_remaining = 0;
286 remaining -= min(PAGE_SIZE, remaining);
287 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
288 } else {
289 memcpy((PUCHAR)coalesce_buf + min(PAGE_SIZE, remaining) - coalesce_remaining, (PUCHAR)va + pi.curr_mdl_offset, length);
290 coalesce_remaining -= length;
291 }
292 } else {
293 length = XenNet_QueryData(&pi, PAGE_SIZE);
294 }
295 if (!length || coalesce_remaining) { /* sometimes there are zero length buffers... */
296 XenNet_EatData(&pi, length); /* do this so we actually move to the next buffer */
297 continue;
298 }
300 if (coalesce_buf) {
301 if (remaining) {
302 txN = XenNet_PutCbOnRing(xi, coalesce_buf, min(PAGE_SIZE, remaining), gref);
303 NT_ASSERT(txN);
304 coalesce_buf = NULL;
305 tx_length += min(PAGE_SIZE, remaining);
306 remaining -= min(PAGE_SIZE, remaining);
307 }
308 } else {
309 ULONG offset;
311 gref = XnAllocateGrant(xi->handle, (ULONG)'XNTX');
312 if (gref == INVALID_GRANT_REF) {
313 KdPrint((__DRIVER_NAME " out of grefs - partial send\n"));
314 break;
315 }
316 txN = RING_GET_REQUEST(&xi->tx_ring, xi->tx_ring.req_prod_pvt);
317 xi->tx_ring.req_prod_pvt++;
318 xi->tx_ring_free--;
319 txN->id = get_id_from_freelist(xi);
320 NT_ASSERT(!xi->tx_shadows[txN->id].cb);
321 offset = MmGetMdlByteOffset(pi.curr_mdl) + pi.curr_mdl_offset;
322 pfn = MmGetMdlPfnArray(pi.curr_mdl)[offset >> PAGE_SHIFT];
323 txN->offset = (USHORT)offset & (PAGE_SIZE - 1);
324 txN->gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, gref, (ULONG)'XNTX');
325 NT_ASSERT(xi->tx_shadows[txN->id].gref == INVALID_GRANT_REF);
326 xi->tx_shadows[txN->id].gref = txN->gref;
327 //ASSERT(sg->Elements[sg_element].Length > sg_offset);
328 txN->size = (USHORT)length;
329 NT_ASSERT(txN->offset + txN->size <= PAGE_SIZE);
330 NT_ASSERT(txN->size);
331 NT_ASSERT(txN->gref != INVALID_GRANT_REF);
332 remaining -= length;
333 tx_length += length;
334 }
335 tx0->size = tx0->size + txN->size;
336 txN->flags = NETTXF_more_data;
337 XenNet_EatData(&pi, length);
338 }
339 txN->flags &= ~NETTXF_more_data;
340 NT_ASSERT(tx0->size == pi.total_length);
341 NT_ASSERT(!xi->tx_shadows[txN->id].packet);
342 xi->tx_shadows[txN->id].packet = packet;
344 #if NTDDI_VERSION < NTDDI_VISTA
345 if (ndis_lso) {
346 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length);
347 }
348 #else
349 switch (lso_info.Transmit.Type) {
350 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
351 lso_info.LsoV1TransmitComplete.TcpPayload = tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length;
352 break;
353 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
354 break;
355 }
356 #endif
358 xi->tx_outstanding++;
359 return TRUE;
360 }
362 /* Called at DISPATCH_LEVEL with tx_lock held */
363 static VOID
364 XenNet_SendQueuedPackets(struct xennet_info *xi)
365 {
366 PLIST_ENTRY entry;
367 #if NTDDI_VERSION < NTDDI_VISTA
368 PNDIS_PACKET packet;
369 #else
370 PNET_BUFFER packet;
371 #endif
372 int notify;
374 if (xi->device_state != DEVICE_STATE_ACTIVE)
375 return;
377 while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
378 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
379 #if NTDDI_VERSION < NTDDI_VISTA
380 packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
381 #else
382 packet = CONTAINING_RECORD(entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
383 #endif
384 if (!XenNet_HWSendPacket(xi, packet)) {
385 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
386 break;
387 }
388 }
390 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx_ring, notify);
391 if (notify) {
392 XnNotify(xi->handle, xi->event_channel);
393 }
394 }
396 // Called at DISPATCH_LEVEL
397 VOID
398 XenNet_TxBufferGC(struct xennet_info *xi, BOOLEAN dont_set_event) {
399 RING_IDX cons, prod;
400 #if NTDDI_VERSION < NTDDI_VISTA
401 PNDIS_PACKET head = NULL, tail = NULL;
402 PNDIS_PACKET packet;
403 #else
404 PNET_BUFFER_LIST head = NULL;
405 PNET_BUFFER_LIST tail = NULL;
406 PNET_BUFFER_LIST nbl;
407 PNET_BUFFER packet;
408 #endif
409 ULONG tx_packets = 0;
411 NT_ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
413 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
415 if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
416 /* there is a chance that our Dpc had been queued just before the shutdown... */
417 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
418 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
419 return;
420 }
422 do {
423 prod = xi->tx_ring.sring->rsp_prod;
424 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
426 for (cons = xi->tx_ring.rsp_cons; cons != prod; cons++)
427 {
428 struct netif_tx_response *txrsp;
429 tx_shadow_t *shadow;
431 txrsp = RING_GET_RESPONSE(&xi->tx_ring, cons);
433 xi->tx_ring_free++;
435 if (txrsp->status == NETIF_RSP_NULL) {
436 continue;
437 }
439 shadow = &xi->tx_shadows[txrsp->id];
440 if (shadow->cb) {
441 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, shadow->cb);
442 shadow->cb = NULL;
443 }
445 if (shadow->gref != INVALID_GRANT_REF) {
446 XnEndAccess(xi->handle, shadow->gref, FALSE, (ULONG)'XNTX');
447 shadow->gref = INVALID_GRANT_REF;
448 }
450 if (shadow->packet) {
451 PMDL mdl;
452 PUCHAR header;
453 packet = shadow->packet;
454 #if NTDDI_VERSION < NTDDI_VISTA
455 mdl = NDIS_PACKET_FIRST_NDIS_BUFFER(packet);
456 #else
457 mdl = NET_BUFFER_CURRENT_MDL(packet);
458 #endif
459 #pragma warning(suppress:28193) /* already mapped so guaranteed to work */
460 header = MmGetSystemAddressForMdlSafe(mdl, LowPagePriority);
461 #if NTDDI_VERSION < NTDDI_VISTA
462 #else
463 header += NET_BUFFER_CURRENT_MDL_OFFSET(nb);
464 #endif
466 #if NTDDI_VERSION < NTDDI_VISTA
467 #else
468 xi->stats.ifHCOutOctets += nb->DataLength;
469 if (nb->DataLength < XN_HDR_SIZE || !(header[0] & 0x01)) {
470 /* unicast or tiny packet */
471 xi->stats.ifHCOutUcastPkts++;
472 xi->stats.ifHCOutUcastOctets += nb->DataLength;
473 }
474 else if (header[0] == 0xFF && header[1] == 0xFF && header[2] == 0xFF
475 && header[3] == 0xFF && header[4] == 0xFF && header[5] == 0xFF) {
476 /* broadcast */
477 xi->stats.ifHCOutBroadcastPkts++;
478 xi->stats.ifHCOutBroadcastOctets += nb->DataLength;
479 } else {
480 /* multicast */
481 xi->stats.ifHCOutMulticastPkts++;
482 xi->stats.ifHCOutMulticastOctets += nb->DataLength;
483 }
484 #endif
486 #if NTDDI_VERSION < NTDDI_VISTA
487 PACKET_NEXT_PACKET(packet) = NULL;
488 if (!head) {
489 head = packet;
490 } else {
491 PACKET_NEXT_PACKET(tail) = packet;
492 }
493 tail = packet;
494 #else
495 nbl = NB_NBL(nb);
496 NBL_REF(nbl)--;
497 if (!NBL_REF(nbl)) {
498 NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL;
499 if (nbl_head) {
500 NET_BUFFER_LIST_NEXT_NBL(nbl_tail) = nbl;
501 nbl_tail = nbl;
502 } else {
503 nbl_head = nbl;
504 nbl_tail = nbl;
505 }
506 }
507 #endif
508 shadow->packet = NULL;
509 tx_packets++;
510 }
511 put_id_on_freelist(xi, txrsp->id);
512 }
514 xi->tx_ring.rsp_cons = prod;
515 /* resist the temptation to set the event more than +1... it breaks things */
516 if (!dont_set_event)
517 xi->tx_ring.sring->rsp_event = prod + 1;
518 KeMemoryBarrier();
519 } while (prod != xi->tx_ring.sring->rsp_prod);
521 /* if queued packets, send them now */
522 XenNet_SendQueuedPackets(xi);
524 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
526 /* must be done without holding any locks */
527 #if NTDDI_VERSION < NTDDI_VISTA
528 while (head) {
529 packet = (PNDIS_PACKET)head;
530 head = PACKET_NEXT_PACKET(packet);
531 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
532 }
533 #else
534 if (nbl_head)
535 NdisMSendNetBufferListsComplete(xi->adapter_handle, nbl_head, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
536 #endif
538 /* must be done after we have truly given back all packets */
539 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
540 xi->tx_outstanding -= tx_packets;
541 if (xi->device_state != DEVICE_STATE_ACTIVE && !xi->tx_outstanding) {
542 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
543 }
544 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
545 }
547 #if NTDDI_VERSION < NTDDI_VISTA
548 VOID
549 XenNet_SendPackets(NDIS_HANDLE MiniportAdapterContext, PPNDIS_PACKET PacketArray, UINT NumberOfPackets) {
550 struct xennet_info *xi = MiniportAdapterContext;
551 PNDIS_PACKET packet;
552 UINT i;
553 PLIST_ENTRY entry;
554 KIRQL old_irql;
556 if (xi->device_state != DEVICE_STATE_ACTIVE) {
557 for (i = 0; i < NumberOfPackets; i++) {
558 NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
559 }
560 return;
561 }
563 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
565 for (i = 0; i < NumberOfPackets; i++) {
566 packet = PacketArray[i];
567 ASSERT(packet);
568 entry = &PACKET_LIST_ENTRY(packet);
569 InsertTailList(&xi->tx_waiting_pkt_list, entry);
570 }
572 XenNet_SendQueuedPackets(xi);
574 KeReleaseSpinLock(&xi->tx_lock, old_irql);
575 }
576 #else
577 // called at <= DISPATCH_LEVEL
578 VOID
579 XenNet_SendNetBufferLists(
580 NDIS_HANDLE adapter_context,
581 PNET_BUFFER_LIST nb_lists,
582 NDIS_PORT_NUMBER port_number,
583 ULONG send_flags) {
584 struct xennet_info *xi = adapter_context;
585 PLIST_ENTRY nb_entry;
586 KIRQL old_irql;
587 PNET_BUFFER_LIST curr_nbl;
588 PNET_BUFFER_LIST next_nbl;
590 UNREFERENCED_PARAMETER(port_number);
592 if (xi->device_state == DEVICE_STATE_INACTIVE) {
593 curr_nbl = nb_lists;
594 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl)) {
595 curr_nbl->Status = NDIS_STATUS_FAILURE;
596 }
597 /* this actions the whole list */
598 NdisMSendNetBufferListsComplete(xi->adapter_handle, nb_lists, (send_flags & NDIS_SEND_FLAGS_DISPATCH_LEVEL)?NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL:0);
599 return;
600 }
602 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
604 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = next_nbl) {
605 PNET_BUFFER curr_nb;
606 NBL_REF(curr_nbl) = 0;
607 next_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl);
608 NET_BUFFER_LIST_NEXT_NBL(curr_nbl) = NULL;
609 for (curr_nb = NET_BUFFER_LIST_FIRST_NB(curr_nbl); curr_nb; curr_nb = NET_BUFFER_NEXT_NB(curr_nb)) {
610 NB_NBL(curr_nb) = curr_nbl;
611 nb_entry = &NB_LIST_ENTRY(curr_nb);
612 InsertTailList(&xi->tx_waiting_pkt_list, nb_entry);
613 NBL_REF(curr_nbl)++;
614 }
615 }
617 XenNet_SendQueuedPackets(xi);
619 KeReleaseSpinLock(&xi->tx_lock, old_irql);
620 }
621 #endif
623 #if 0
624 VOID
625 XenNet_CancelSend(NDIS_HANDLE adapter_context, PVOID cancel_id)
626 {
627 UNREFERENCED_PARAMETER(adapter_context);
628 UNREFERENCED_PARAMETER(cancel_id);
629 FUNCTION_ENTER();
631 FUNCTION_EXIT();
632 }
633 #endif
635 BOOLEAN
636 XenNet_TxInit(xennet_info_t *xi) {
637 USHORT i;
638 UNREFERENCED_PARAMETER(xi);
640 KeInitializeSpinLock(&xi->tx_lock);
641 InitializeListHead(&xi->tx_waiting_pkt_list);
643 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
644 xi->tx_outstanding = 0;
645 xi->tx_ring_free = NET_TX_RING_SIZE;
647 NdisInitializeNPagedLookasideList(&xi->tx_lookaside_list, NULL, NULL, 0,
648 PAGE_SIZE, XENNET_POOL_TAG, 0);
650 xi->tx_id_free = 0;
651 for (i = 0; i < NET_TX_RING_SIZE; i++) {
652 xi->tx_shadows[i].gref = INVALID_GRANT_REF;
653 xi->tx_shadows[i].cb = NULL;
654 put_id_on_freelist(xi, i);
655 }
657 return TRUE;
658 }
660 /*
661 The ring is completely closed down now. We just need to empty anything left
662 on our freelists and harvest anything left on the rings.
663 */
665 BOOLEAN
666 XenNet_TxShutdown(xennet_info_t *xi) {
667 #if NTDDI_VERSION < NTDDI_VISTA
668 PNDIS_PACKET packet;
669 #else
670 PNET_BUFFER packet;
671 PNET_BUFFER_LIST nbl;
672 #endif
673 PLIST_ENTRY entry;
674 LARGE_INTEGER timeout;
675 KIRQL old_irql;
677 FUNCTION_ENTER();
679 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
681 while (xi->tx_outstanding) {
682 KeReleaseSpinLock(&xi->tx_lock, old_irql);
683 KdPrint((__DRIVER_NAME " Waiting for %d remaining packets to be sent\n", xi->tx_outstanding));
684 timeout.QuadPart = -1 * 1 * 1000 * 1000 * 10; /* 1 second */
685 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, &timeout);
686 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
687 }
688 KeReleaseSpinLock(&xi->tx_lock, old_irql);
690 /* Free packets in tx queue */
691 while (!IsListEmpty(&xi->tx_waiting_pkt_list)) {
692 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
693 #if NTDDI_VERSION < NTDDI_VISTA
694 packet = CONTAINING_RECORD(entry, NDIS_PACKET, PACKET_LIST_ENTRY_FIELD);
695 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
696 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
697 #else
698 packet = CONTAINING_RECORD(nb_entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
699 nbl = NB_NBL(nb);
700 NBL_REF(nbl)--;
701 if (!NBL_REF(nbl)) {
702 nbl->Status = NDIS_STATUS_FAILURE;
703 NdisMSendNetBufferListsComplete(xi->adapter_handle, nbl, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
704 }
705 #endif
706 }
707 NdisDeleteNPagedLookasideList(&xi->tx_lookaside_list);
709 FUNCTION_EXIT();
711 return TRUE;
712 }