win-pvdrivers

view xennet/xennet6_tx.c @ 979:8f483a2b2991

Fix up PREfast warnings
author James Harper <james.harper@bendigoit.com.au>
date Sun Apr 15 19:47:10 2012 +1000 (2012-04-15)
parents 941699790045
children 20ea0ca954e4
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet6.h"
23 static USHORT
24 get_id_from_freelist(struct xennet_info *xi)
25 {
26 ASSERT(xi->tx_id_free);
27 xi->tx_id_free--;
29 return xi->tx_id_list[xi->tx_id_free];
30 }
32 static VOID
33 put_id_on_freelist(struct xennet_info *xi, USHORT id)
34 {
35 xi->tx_id_list[xi->tx_id_free] = id;
36 xi->tx_id_free++;
37 }
39 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
41 static __forceinline struct netif_tx_request *
42 XenNet_PutCbOnRing(struct xennet_info *xi, PVOID coalesce_buf, ULONG length, grant_ref_t gref)
43 {
44 struct netif_tx_request *tx;
45 tx = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
46 xi->tx.req_prod_pvt++;
47 xi->tx_ring_free--;
48 tx->id = get_id_from_freelist(xi);
49 ASSERT(xi->tx_shadows[tx->id].gref == INVALID_GRANT_REF);
50 ASSERT(!xi->tx_shadows[tx->id].cb);
51 xi->tx_shadows[tx->id].cb = coalesce_buf;
52 tx->gref = xi->vectors.GntTbl_GrantAccess(xi->vectors.context, (ULONG)(MmGetPhysicalAddress(coalesce_buf).QuadPart >> PAGE_SHIFT), FALSE, gref, (ULONG)'XNTX');
53 xi->tx_shadows[tx->id].gref = tx->gref;
54 tx->offset = 0;
55 tx->size = (USHORT)length;
56 ASSERT(tx->offset + tx->size <= PAGE_SIZE);
57 ASSERT(tx->size);
58 return tx;
59 }
61 /* Called at DISPATCH_LEVEL with tx_lock held */
62 /*
63 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
64 */
65 static BOOLEAN
66 XenNet_HWSendPacket(struct xennet_info *xi, PNET_BUFFER nb)
67 {
68 struct netif_tx_request *tx0 = NULL;
69 struct netif_tx_request *txN = NULL;
70 struct netif_extra_info *ei = NULL;
71 NDIS_TCP_LARGE_SEND_OFFLOAD_NET_BUFFER_LIST_INFO lso_info;
72 ULONG mss = 0;
73 NDIS_TCP_IP_CHECKSUM_NET_BUFFER_LIST_INFO csum_info;
74 uint16_t flags = NETTXF_more_data;
75 packet_info_t pi;
76 BOOLEAN ndis_lso = FALSE;
77 BOOLEAN xen_gso = FALSE;
78 ULONG remaining;
79 ULONG frags = 0;
80 BOOLEAN coalesce_required = FALSE;
81 PVOID coalesce_buf;
82 ULONG coalesce_remaining = 0;
83 grant_ref_t gref;
84 ULONG tx_length = 0;
86 //FUNCTION_ENTER();
88 gref = xi->vectors.GntTbl_GetRef(xi->vectors.context, (ULONG)'XNTX');
89 if (gref == INVALID_GRANT_REF)
90 {
91 FUNCTION_MSG("out of grefs\n");
92 return FALSE;
93 }
94 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
95 if (!coalesce_buf)
96 {
97 xi->vectors.GntTbl_PutRef(xi->vectors.context, gref, (ULONG)'XNTX');
98 FUNCTION_MSG("out of memory\n");
99 return FALSE;
100 }
101 XenNet_ClearPacketInfo(&pi);
102 //NdisQueryPacket(packet, NULL, (PUINT)&pi.mdl_count, &pi.first_buffer, (PUINT)&pi.total_length);
103 /* create a new MDL over the data portion of the first MDL in the packet... it's just easier this way */
104 IoBuildPartialMdl(nb->CurrentMdl,
105 &pi.first_mdl_storage,
106 (PUCHAR)MmGetMdlVirtualAddress(nb->CurrentMdl) + nb->CurrentMdlOffset,
107 MmGetMdlByteCount(nb->CurrentMdl) - nb->CurrentMdlOffset);
108 pi.first_mdl_storage.Next = nb->CurrentMdl->Next;
109 pi.first_mdl = pi.curr_mdl = &pi.first_mdl_storage;
110 pi.first_mdl_offset = pi.curr_mdl_offset = 0;
111 pi.total_length = nb->DataLength;
112 remaining = min(pi.total_length, PAGE_SIZE);
113 while (remaining) /* this much gets put in the header */
114 {
115 ULONG length = XenNet_QueryData(&pi, remaining);
116 remaining -= length;
117 XenNet_EatData(&pi, length);
118 }
119 frags++;
120 if (pi.total_length > PAGE_SIZE) /* these are the frags we care about */
121 {
122 remaining = pi.total_length - PAGE_SIZE;
123 while (remaining)
124 {
125 ULONG length = XenNet_QueryData(&pi, PAGE_SIZE);
126 if (length != 0)
127 {
128 frags++;
129 if (frags > LINUX_MAX_SG_ELEMENTS)
130 break; /* worst case there could be hundreds of fragments - leave the loop now */
131 }
132 remaining -= length;
133 XenNet_EatData(&pi, length);
134 }
135 }
136 if (frags > LINUX_MAX_SG_ELEMENTS)
137 {
138 frags = LINUX_MAX_SG_ELEMENTS;
139 coalesce_required = TRUE;
140 }
142 /* if we have enough space on the ring then we have enough id's so no need to check for that */
143 if (xi->tx_ring_free < frags + 1)
144 {
145 xi->vectors.GntTbl_PutRef(xi->vectors.context, gref, (ULONG)'XNTX');
146 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
147 //KdPrint((__DRIVER_NAME " Full on send - ring full\n"));
148 return FALSE;
149 }
150 XenNet_ParsePacketHeader(&pi, coalesce_buf, PAGE_SIZE);
151 remaining = pi.total_length - pi.header_length;
152 if (pi.ip_version == 4 && pi.ip_proto == 6 && pi.ip4_length == 0)
153 {
154 #if 0
155 PMDL tmp_mdl;
156 PUCHAR my_va;
157 FUNCTION_MSG("total_length == 0, CurrentMdlOffset == %d\n", nb->CurrentMdlOffset);
158 my_va = MmGetMdlVirtualAddress(pi.first_mdl);
159 FUNCTION_MSG(" first mdl va = %p, offset = %d, length = %d\n", MmGetMdlVirtualAddress(pi.first_mdl), MmGetMdlByteOffset(pi.first_mdl), MmGetMdlByteCount(pi.first_mdl));
160 FUNCTION_MSG(" 0010: %02x%02x\n", my_va[0x10], my_va[0x11]);
161 for (tmp_mdl = nb->CurrentMdl; tmp_mdl; tmp_mdl = tmp_mdl->Next)
162 {
163 FUNCTION_MSG(" mdl = %p, va = %p, offset = %d, length = %d\n", tmp_mdl, MmGetMdlVirtualAddress(tmp_mdl), MmGetMdlByteOffset(tmp_mdl), MmGetMdlByteCount(tmp_mdl));
164 if (tmp_mdl == nb->CurrentMdl)
165 {
166 my_va = MmGetSystemAddressForMdlSafe(tmp_mdl, HighPagePriority);
167 my_va += nb->CurrentMdlOffset;
168 FUNCTION_MSG(" 0010: %02x%02x\n", my_va[0x10], my_va[0x11]);
169 }
170 }
171 #endif
172 *((PUSHORT)(pi.header + 0x10)) = GET_NET_USHORT((USHORT)nb->DataLength - XN_HDR_SIZE);
173 }
175 // do we need to check if the packet is tcpip??
176 csum_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(nb), TcpIpChecksumNetBufferListInfo);
177 if (csum_info.Transmit.IsIPv4)
178 {
179 #if 0
180 if (csum_info.Transmit.IpHeaderChecksum && !xi->setting_csum.V4Transmit.IpChecksum)
181 {
182 KdPrint((__DRIVER_NAME " IpChecksum not enabled\n"));
183 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
184 //return TRUE;
185 }
186 #endif
187 if (csum_info.Transmit.TcpChecksum)
188 {
189 if (1) //xi->setting_csum.V4Transmit.TcpChecksum)
190 {
191 flags |= NETTXF_csum_blank | NETTXF_data_validated;
192 }
193 else
194 {
195 KdPrint((__DRIVER_NAME " TcpChecksum not enabled\n"));
196 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
197 //return TRUE;
198 }
199 }
200 else if (csum_info.Transmit.UdpChecksum)
201 {
202 if (1) //xi->setting_csum.V4Transmit.UdpChecksum)
203 {
204 flags |= NETTXF_csum_blank | NETTXF_data_validated;
205 }
206 else
207 {
208 KdPrint((__DRIVER_NAME " UdpChecksum not enabled\n"));
209 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
210 //return TRUE;
211 }
212 }
213 }
214 else if (csum_info.Transmit.IsIPv6)
215 {
216 KdPrint((__DRIVER_NAME " Transmit.IsIPv6 not supported\n"));
217 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
218 //return TRUE;
219 }
221 lso_info.Value = NET_BUFFER_LIST_INFO(NB_NBL(nb), TcpLargeSendNetBufferListInfo);
222 switch (lso_info.Transmit.Type)
223 {
224 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
225 mss = lso_info.LsoV1Transmit.MSS;
226 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
227 break;
228 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
229 mss = lso_info.LsoV2Transmit.MSS;
230 /* should make use of TcpHeaderOffset too... maybe just assert if it's not what we expect */
231 break;
232 }
233 if (mss && pi.parse_result == PARSE_OK)
234 {
235 //FUNCTION_MSG("lso mss = %d\n", mss);
236 //if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) != NDIS_PROTOCOL_ID_TCP_IP)
237 //{
238 // KdPrint((__DRIVER_NAME " mss specified when packet is not NDIS_PROTOCOL_ID_TCP_IP\n"));
239 //}
240 ndis_lso = TRUE;
241 #if 0
242 if (mss > xi->current_lso_ipv4)
243 {
244 KdPrint((__DRIVER_NAME " Requested MSS (%d) larger than allowed MSS (%d)\n", mss, xi->current_lso_ipv4));
245 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
246 //FUNCTION_EXIT();
247 return TRUE;
248 }
249 #endif
250 }
252 if (ndis_lso)
253 {
254 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
255 if (pi.tcp_length >= mss)
256 {
257 flags |= NETTXF_extra_info;
258 xen_gso = TRUE;
259 }
260 else
261 {
262 KdPrint((__DRIVER_NAME " large send specified when tcp_length < mss\n"));
263 }
264 }
265 /*
266 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
267 * (C) rest of requests on the ring. Only (A) has csum flags.
268 */
270 /* (A) */
271 tx0 = XenNet_PutCbOnRing(xi, coalesce_buf, pi.header_length, gref);
272 ASSERT(tx0); /* this will never happen */
273 tx0->flags = flags;
274 tx_length += pi.header_length;
276 /* lso implies IpHeaderChecksum */
277 if (ndis_lso || csum_info.Transmit.IpHeaderChecksum)
278 {
279 XenNet_SumIpHeader(coalesce_buf, pi.ip4_header_length);
280 }
281 txN = tx0;
283 /* (B) */
284 if (xen_gso)
285 {
286 ASSERT(flags & NETTXF_extra_info);
287 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
288 //KdPrint((__DRIVER_NAME " pos = %d\n", xi->tx.req_prod_pvt));
289 xi->tx.req_prod_pvt++;
290 xi->tx_ring_free--;
291 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
292 ei->flags = 0;
293 ei->u.gso.size = (USHORT)mss;
294 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
295 ei->u.gso.pad = 0;
296 ei->u.gso.features = 0;
297 }
299 ASSERT(xi->current_sg_supported || !remaining);
301 /* (C) - only if data is remaining */
302 coalesce_buf = NULL;
303 while (remaining > 0)
304 {
305 ULONG length;
306 PFN_NUMBER pfn;
308 ASSERT(pi.curr_mdl);
309 if (coalesce_required)
310 {
311 PVOID va;
312 if (!coalesce_buf)
313 {
314 gref = xi->vectors.GntTbl_GetRef(xi->vectors.context, (ULONG)'XNTX');
315 if (gref == INVALID_GRANT_REF)
316 {
317 KdPrint((__DRIVER_NAME " out of grefs - partial send\n"));
318 break;
319 }
320 coalesce_buf = NdisAllocateFromNPagedLookasideList(&xi->tx_lookaside_list);
321 if (!coalesce_buf)
322 {
323 xi->vectors.GntTbl_PutRef(xi->vectors.context, gref, (ULONG)'XNTX');
324 KdPrint((__DRIVER_NAME " out of memory - partial send\n"));
325 break;
326 }
327 coalesce_remaining = min(PAGE_SIZE, remaining);
328 }
329 length = XenNet_QueryData(&pi, coalesce_remaining);
330 va = NdisBufferVirtualAddressSafe(pi.curr_mdl, LowPagePriority);
331 if (!va)
332 {
333 KdPrint((__DRIVER_NAME " failed to map buffer va - partial send\n"));
334 coalesce_remaining = 0;
335 remaining -= min(PAGE_SIZE, remaining);
336 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, coalesce_buf);
337 }
338 else
339 {
340 memcpy((PUCHAR)coalesce_buf + min(PAGE_SIZE, remaining) - coalesce_remaining, (PUCHAR)va + pi.curr_mdl_offset, length);
341 coalesce_remaining -= length;
342 }
343 }
344 else
345 {
346 length = XenNet_QueryData(&pi, PAGE_SIZE);
347 }
348 if (!length || coalesce_remaining) /* sometimes there are zero length buffers... */
349 {
350 XenNet_EatData(&pi, length); /* do this so we actually move to the next buffer */
351 continue;
352 }
354 if (coalesce_buf)
355 {
356 if (remaining)
357 {
358 txN = XenNet_PutCbOnRing(xi, coalesce_buf, min(PAGE_SIZE, remaining), gref);
359 ASSERT(txN);
360 coalesce_buf = NULL;
361 tx_length += min(PAGE_SIZE, remaining);
362 remaining -= min(PAGE_SIZE, remaining);
363 }
364 }
365 else
366 {
367 ULONG offset;
369 gref = xi->vectors.GntTbl_GetRef(xi->vectors.context, (ULONG)'XNTX');
370 if (gref == INVALID_GRANT_REF)
371 {
372 KdPrint((__DRIVER_NAME " out of grefs - partial send\n"));
373 break;
374 }
375 txN = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
376 xi->tx.req_prod_pvt++;
377 xi->tx_ring_free--;
378 txN->id = get_id_from_freelist(xi);
379 ASSERT(!xi->tx_shadows[txN->id].cb);
380 offset = MmGetMdlByteOffset(pi.curr_mdl) + pi.curr_mdl_offset;
381 pfn = MmGetMdlPfnArray(pi.curr_mdl)[offset >> PAGE_SHIFT];
382 txN->offset = (USHORT)offset & (PAGE_SIZE - 1);
383 txN->gref = xi->vectors.GntTbl_GrantAccess(xi->vectors.context, (ULONG)pfn, FALSE, gref, (ULONG)'XNTX');
384 ASSERT(xi->tx_shadows[txN->id].gref == INVALID_GRANT_REF);
385 xi->tx_shadows[txN->id].gref = txN->gref;
386 //ASSERT(sg->Elements[sg_element].Length > sg_offset);
387 txN->size = (USHORT)length;
388 ASSERT(txN->offset + txN->size <= PAGE_SIZE);
389 ASSERT(txN->size);
390 ASSERT(txN->gref != INVALID_GRANT_REF);
391 remaining -= length;
392 tx_length += length;
393 }
394 tx0->size = tx0->size + txN->size;
395 txN->flags = NETTXF_more_data;
396 XenNet_EatData(&pi, length);
397 }
398 txN->flags &= ~NETTXF_more_data;
399 ASSERT(tx0->size == pi.total_length);
400 ASSERT(!xi->tx_shadows[txN->id].nb);
401 xi->tx_shadows[txN->id].nb = nb;
403 switch (lso_info.Transmit.Type)
404 {
405 case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
406 lso_info.LsoV1TransmitComplete.TcpPayload = tx_length - MAX_ETH_HEADER_LENGTH - pi.ip4_header_length - pi.tcp_header_length;
407 break;
408 case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
409 break;
410 }
412 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
413 //FUNCTION_EXIT();
414 xi->tx_outstanding++;
415 //total_sent++;
416 //FUNCTION_MSG("sent packet\n");
417 return TRUE;
418 }
420 /* Called at DISPATCH_LEVEL with tx_lock held */
421 static VOID
422 XenNet_SendQueuedPackets(struct xennet_info *xi)
423 {
424 PLIST_ENTRY nb_entry;
425 PNET_BUFFER nb;
426 int notify;
428 //FUNCTION_ENTER();
430 ASSERT(!KeTestSpinLock(&xi->tx_lock));
431 if (xi->device_state->suspend_resume_state_pdo != SR_STATE_RUNNING)
432 return;
434 while (!IsListEmpty(&xi->tx_waiting_pkt_list))
435 {
436 nb_entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
437 nb = CONTAINING_RECORD(nb_entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
439 if (!XenNet_HWSendPacket(xi, nb))
440 {
441 //KdPrint((__DRIVER_NAME " No room for packet\n"));
442 InsertHeadList(&xi->tx_waiting_pkt_list, nb_entry);
443 break;
444 }
445 }
447 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx, notify);
448 if (notify)
449 {
450 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->event_channel);
451 }
452 //FUNCTION_EXIT();
453 }
455 // Called at DISPATCH_LEVEL
456 VOID
457 XenNet_TxBufferGC(struct xennet_info *xi, BOOLEAN dont_set_event)
458 {
459 RING_IDX cons, prod;
460 PNET_BUFFER_LIST nbl_head = NULL;
461 PNET_BUFFER_LIST nbl_tail = NULL;
462 PNET_BUFFER_LIST nbl;
463 PNET_BUFFER nb;
464 ULONG tx_packets = 0;
466 //FUNCTION_ENTER();
468 if (!xi->connected)
469 return; /* a delayed DPC could let this come through... just do nothing */
470 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
472 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
474 // InitializeListHead(&nbl_head);
475 if (xi->tx_shutting_down && !xi->tx_outstanding)
476 {
477 /* there is a chance that our Dpc had been queued just before the shutdown... */
478 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
479 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
480 return;
481 }
483 do {
484 prod = xi->tx.sring->rsp_prod;
485 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
487 for (cons = xi->tx.rsp_cons; cons != prod; cons++)
488 {
489 struct netif_tx_response *txrsp;
490 tx_shadow_t *shadow;
492 txrsp = RING_GET_RESPONSE(&xi->tx, cons);
494 xi->tx_ring_free++;
496 if (txrsp->status == NETIF_RSP_NULL)
497 {
498 continue;
499 }
501 shadow = &xi->tx_shadows[txrsp->id];
502 if (shadow->cb)
503 {
504 NdisFreeToNPagedLookasideList(&xi->tx_lookaside_list, shadow->cb);
505 shadow->cb = NULL;
506 }
508 if (shadow->gref != INVALID_GRANT_REF)
509 {
510 xi->vectors.GntTbl_EndAccess(xi->vectors.context,
511 shadow->gref, FALSE, (ULONG)'XNTX');
512 shadow->gref = INVALID_GRANT_REF;
513 }
515 if (shadow->nb)
516 {
517 PMDL mdl;
518 PUCHAR header;
519 nb = shadow->nb;
520 mdl = NET_BUFFER_CURRENT_MDL(nb);
521 #pragma warning(suppress:28193) /* already mapped so guaranteed to work */
522 header = MmGetSystemAddressForMdlSafe(mdl, LowPagePriority);
523 header += NET_BUFFER_CURRENT_MDL_OFFSET(nb);
525 xi->stats.ifHCOutOctets += nb->DataLength;
526 if (nb->DataLength < XN_HDR_SIZE || !(header[0] & 0x01))
527 {
528 /* unicast or tiny packet */
529 xi->stats.ifHCOutUcastPkts++;
530 xi->stats.ifHCOutUcastOctets += nb->DataLength;
531 }
532 else if (header[0] == 0xFF && header[1] == 0xFF && header[2] == 0xFF
533 && header[3] == 0xFF && header[4] == 0xFF && header[5] == 0xFF)
534 {
535 /* broadcast */
536 xi->stats.ifHCOutBroadcastPkts++;
537 xi->stats.ifHCOutBroadcastOctets += nb->DataLength;
538 }
539 else
540 {
541 /* multicast */
542 xi->stats.ifHCOutMulticastPkts++;
543 xi->stats.ifHCOutMulticastOctets += nb->DataLength;
544 }
546 nbl = NB_NBL(nb);
547 NBL_REF(nbl)--;
548 if (!NBL_REF(nbl))
549 {
550 NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL;
551 if (nbl_head)
552 {
553 NET_BUFFER_LIST_NEXT_NBL(nbl_tail) = nbl;
554 nbl_tail = nbl;
555 }
556 else
557 {
558 nbl_head = nbl;
559 nbl_tail = nbl;
560 }
561 }
562 shadow->nb = NULL;
563 tx_packets++;
564 }
565 put_id_on_freelist(xi, txrsp->id);
566 }
568 xi->tx.rsp_cons = prod;
569 /* resist the temptation to set the event more than +1... it breaks things */
570 if (!dont_set_event)
571 xi->tx.sring->rsp_event = prod + 1;
572 KeMemoryBarrier();
573 } while (prod != xi->tx.sring->rsp_prod);
575 /* if queued packets, send them now */
576 if (!xi->tx_shutting_down)
577 XenNet_SendQueuedPackets(xi);
579 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
581 /* must be done without holding any locks */
582 if (nbl_head)
583 NdisMSendNetBufferListsComplete(xi->adapter_handle, nbl_head, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
585 /* must be done after we have truly given back all packets */
586 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
587 xi->tx_outstanding -= tx_packets;
588 if (!xi->tx_outstanding && xi->tx_shutting_down)
589 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
590 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
592 if (xi->device_state->suspend_resume_state_pdo == SR_STATE_SUSPENDING
593 && xi->device_state->suspend_resume_state_fdo != SR_STATE_SUSPENDING
594 && xi->tx_id_free == NET_TX_RING_SIZE)
595 {
596 KdPrint((__DRIVER_NAME " Setting SR_STATE_SUSPENDING\n"));
597 xi->device_state->suspend_resume_state_fdo = SR_STATE_SUSPENDING;
598 KdPrint((__DRIVER_NAME " Notifying event channel %d\n", xi->device_state->pdo_event_channel));
599 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->device_state->pdo_event_channel);
600 }
602 //FUNCTION_EXIT();
603 }
605 // called at <= DISPATCH_LEVEL
606 VOID
607 XenNet_SendNetBufferLists(
608 NDIS_HANDLE adapter_context,
609 PNET_BUFFER_LIST nb_lists,
610 NDIS_PORT_NUMBER port_number,
611 ULONG send_flags)
612 {
613 struct xennet_info *xi = adapter_context;
614 PLIST_ENTRY nb_entry;
615 KIRQL old_irql;
616 PNET_BUFFER_LIST curr_nbl;
617 PNET_BUFFER_LIST next_nbl;
619 UNREFERENCED_PARAMETER(port_number);
620 //FUNCTION_ENTER();
622 if (xi->inactive)
623 {
624 curr_nbl = nb_lists;
625 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl))
626 {
627 KdPrint((__DRIVER_NAME " NBL %p\n", curr_nbl));
628 curr_nbl->Status = NDIS_STATUS_FAILURE;
629 }
630 /* this actions the whole list */
631 NdisMSendNetBufferListsComplete(xi->adapter_handle, nb_lists, (send_flags & NDIS_SEND_FLAGS_DISPATCH_LEVEL)?NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL:0);
632 return;
633 }
635 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
637 for (curr_nbl = nb_lists; curr_nbl; curr_nbl = next_nbl)
638 {
639 PNET_BUFFER curr_nb;
640 NBL_REF(curr_nbl) = 0;
641 next_nbl = NET_BUFFER_LIST_NEXT_NBL(curr_nbl);
642 NET_BUFFER_LIST_NEXT_NBL(curr_nbl) = NULL;
643 for (curr_nb = NET_BUFFER_LIST_FIRST_NB(curr_nbl); curr_nb; curr_nb = NET_BUFFER_NEXT_NB(curr_nb))
644 {
645 NB_NBL(curr_nb) = curr_nbl;
646 nb_entry = &NB_LIST_ENTRY(curr_nb);
647 InsertTailList(&xi->tx_waiting_pkt_list, nb_entry);
648 NBL_REF(curr_nbl)++;
649 }
650 }
652 XenNet_SendQueuedPackets(xi);
654 KeReleaseSpinLock(&xi->tx_lock, old_irql);
656 //FUNCTION_EXIT();
657 }
659 VOID
660 XenNet_CancelSend(NDIS_HANDLE adapter_context, PVOID cancel_id)
661 {
662 UNREFERENCED_PARAMETER(adapter_context);
663 UNREFERENCED_PARAMETER(cancel_id);
664 FUNCTION_ENTER();
665 #if 0
666 struct xennet_info *xi = MiniportAdapterContext;
667 KIRQL old_irql;
668 PLIST_ENTRY nb_entry;
669 PNDIS_PACKET packet;
670 PNDIS_PACKET head = NULL, tail = NULL;
671 BOOLEAN result;
672 #endif
673 FUNCTION_ENTER();
675 #if 0
676 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
678 nb_entry = xi->tx_waiting_pkt_list.Flink;
679 while (nb_entry != &xi->tx_waiting_pkt_list)
680 {
681 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
682 entry = entry->Flink;
683 if (NDIS_GET_PACKET_CANCEL_ID(packet) == CancelId)
684 {
685 KdPrint((__DRIVER_NAME " Found packet to cancel %p\n", packet));
686 result = RemoveEntryList((PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)]);
687 ASSERT(result);
688 *(PNDIS_PACKET *)&packet->MiniportReservedEx[0] = NULL;
689 if (head)
690 *(PNDIS_PACKET *)&tail->MiniportReservedEx[0] = packet;
691 else
692 head = packet;
693 tail = packet;
694 }
695 }
697 KeReleaseSpinLock(&xi->tx_lock, old_irql);
699 while (head)
700 {
701 packet = (PNDIS_PACKET)head;
702 head = *(PNDIS_PACKET *)&packet->MiniportReservedEx[0];
703 KdPrint((__DRIVER_NAME " NdisMSendComplete(%p)\n", packet));
704 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_REQUEST_ABORTED);
705 }
706 #endif
708 FUNCTION_EXIT();
709 }
711 VOID
712 XenNet_TxResumeStart(xennet_info_t *xi)
713 {
714 UNREFERENCED_PARAMETER(xi);
716 FUNCTION_ENTER();
717 /* nothing to do here - all packets were already sent */
718 FUNCTION_EXIT();
719 }
721 VOID
722 XenNet_TxResumeEnd(xennet_info_t *xi)
723 {
724 KIRQL old_irql;
726 FUNCTION_ENTER();
728 UNREFERENCED_PARAMETER(xi);
730 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
731 //XenNet_SendQueuedPackets(xi);
732 KeReleaseSpinLock(&xi->tx_lock, old_irql);
734 FUNCTION_EXIT();
735 }
737 BOOLEAN
738 XenNet_TxInit(xennet_info_t *xi)
739 {
740 USHORT i;
741 UNREFERENCED_PARAMETER(xi);
743 KeInitializeSpinLock(&xi->tx_lock);
744 InitializeListHead(&xi->tx_waiting_pkt_list);
746 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
747 xi->tx_shutting_down = FALSE;
748 xi->tx_outstanding = 0;
749 xi->tx_ring_free = NET_TX_RING_SIZE;
751 NdisInitializeNPagedLookasideList(&xi->tx_lookaside_list, NULL, NULL, 0,
752 PAGE_SIZE, XENNET_POOL_TAG, 0);
754 xi->tx_id_free = 0;
755 for (i = 0; i < NET_TX_RING_SIZE; i++)
756 {
757 xi->tx_shadows[i].gref = INVALID_GRANT_REF;
758 xi->tx_shadows[i].cb = NULL;
759 put_id_on_freelist(xi, i);
760 }
762 return TRUE;
763 }
765 /*
766 The ring is completely closed down now. We just need to empty anything left
767 on our freelists and harvest anything left on the rings.
768 */
770 BOOLEAN
771 XenNet_TxShutdown(xennet_info_t *xi)
772 {
773 //PLIST_ENTRY entry;
774 //PNDIS_PACKET packet;
775 ////PMDL mdl;
776 ////ULONG i;
777 KIRQL old_irql;
778 PNET_BUFFER nb;
779 PNET_BUFFER_LIST nbl;
780 PLIST_ENTRY nb_entry;
781 LARGE_INTEGER timeout;
783 FUNCTION_ENTER();
785 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
786 xi->tx_shutting_down = TRUE;
787 KeReleaseSpinLock(&xi->tx_lock, old_irql);
789 while (xi->tx_outstanding)
790 {
791 KdPrint((__DRIVER_NAME " Waiting for %d remaining packets to be sent\n", xi->tx_outstanding));
792 timeout.QuadPart = -1 * 1 * 1000 * 1000 * 10; /* 1 second */
793 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, &timeout);
794 }
796 KeFlushQueuedDpcs();
798 /* Free packets in tx queue */
799 while (!IsListEmpty(&xi->tx_waiting_pkt_list))
800 {
801 nb_entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
802 nb = CONTAINING_RECORD(nb_entry, NET_BUFFER, NB_LIST_ENTRY_FIELD);
803 nbl = NB_NBL(nb);
804 NBL_REF(nbl)--;
805 if (!NBL_REF(nbl))
806 {
807 nbl->Status = NDIS_STATUS_FAILURE;
808 NdisMSendNetBufferListsComplete(xi->adapter_handle, nbl, NDIS_SEND_COMPLETE_FLAGS_DISPATCH_LEVEL);
809 }
810 }
811 NdisDeleteNPagedLookasideList(&xi->tx_lookaside_list);
813 FUNCTION_EXIT();
815 return TRUE;
816 }