win-pvdrivers

view xennet/xennet_tx.c @ 586:bdb2d6b163f3

More protection against the Dpcs running after shutdown
author James Harper <james.harper@bendigoit.com.au>
date Sun Jun 07 20:31:20 2009 +1000 (2009-06-07)
parents dc0a293c870c
children 63a4d934929f
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 static USHORT
24 get_id_from_freelist(struct xennet_info *xi)
25 {
26 ASSERT(xi->tx_id_free);
27 xi->tx_id_free--;
29 return xi->tx_id_list[xi->tx_id_free];
30 }
32 static VOID
33 put_id_on_freelist(struct xennet_info *xi, USHORT id)
34 {
35 xi->tx_id_list[xi->tx_id_free] = id;
36 xi->tx_id_free++;
37 }
39 static __inline shared_buffer_t *
40 get_hb_from_freelist(struct xennet_info *xi)
41 {
42 shared_buffer_t *hb;
44 //FUNCTION_ENTER();
45 if (xi->tx_hb_free == 0)
46 {
47 //FUNCTION_EXIT();
48 return NULL;
49 }
50 xi->tx_hb_free--;
51 //KdPrint((__DRIVER_NAME " xi->tx_hb_free = %d\n", xi->tx_hb_free));
52 //KdPrint((__DRIVER_NAME " xi->tx_hb_list[xi->tx_hb_free] = %d\n", xi->tx_hb_list[xi->tx_hb_free]));
53 hb = &xi->tx_hbs[xi->tx_hb_list[xi->tx_hb_free]];
54 //KdPrint((__DRIVER_NAME " hb = %p\n", hb));
55 //FUNCTION_EXIT();
56 return hb;
57 }
59 static __inline VOID
60 put_hb_on_freelist(struct xennet_info *xi, shared_buffer_t *hb)
61 {
62 //FUNCTION_ENTER();
64 //KdPrint((__DRIVER_NAME " hb = %p\n", hb));
65 //KdPrint((__DRIVER_NAME " xi->tx_hb_free = %d\n", xi->tx_hb_free));
66 ASSERT(hb);
67 xi->tx_hb_list[xi->tx_hb_free] = hb->id;
68 xi->tx_hb_free++;
69 //FUNCTION_EXIT();
70 }
72 #define SWAP_USHORT(x) (USHORT)((((x & 0xFF) << 8)|((x >> 8) & 0xFF)))
74 /* Called at DISPATCH_LEVEL with tx_lock held */
75 /*
76 * Send one NDIS_PACKET. This may involve multiple entries on TX ring.
77 */
78 static BOOLEAN
79 XenNet_HWSendPacket(struct xennet_info *xi, PNDIS_PACKET packet)
80 {
81 struct netif_tx_request *tx0 = NULL;
82 struct netif_tx_request *txN = NULL;
83 struct netif_extra_info *ei = NULL;
84 PNDIS_TCP_IP_CHECKSUM_PACKET_INFO csum_info;
85 //UINT total_packet_length;
86 ULONG mss = 0;
87 uint16_t flags = NETTXF_more_data;
88 packet_info_t pi;
89 BOOLEAN ndis_lso = FALSE;
90 BOOLEAN xen_gso = FALSE;
91 //ULONG remaining;
92 PSCATTER_GATHER_LIST sg;
93 ULONG sg_element = 0;
94 ULONG sg_offset = 0;
95 ULONG parse_result;
96 shared_buffer_t *header_buf = NULL;
97 ULONG chunks = 0;
99 //FUNCTION_ENTER();
101 XenNet_ClearPacketInfo(&pi);
102 NdisQueryPacket(packet, NULL, (PUINT)&pi.mdl_count, &pi.first_buffer, (PUINT)&pi.total_length);
103 //KdPrint((__DRIVER_NAME " A - packet = %p, mdl_count = %d, total_length = %d\n", packet, pi.mdl_count, pi.total_length));
105 parse_result = XenNet_ParsePacketHeader(&pi);
106 //KdPrint((__DRIVER_NAME " B\n"));
108 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) == NDIS_PROTOCOL_ID_TCP_IP)
109 {
110 csum_info = (PNDIS_TCP_IP_CHECKSUM_PACKET_INFO)&NDIS_PER_PACKET_INFO_FROM_PACKET(
111 packet, TcpIpChecksumPacketInfo);
112 if (csum_info->Transmit.NdisPacketChecksumV4)
113 {
114 if (csum_info->Transmit.NdisPacketIpChecksum && !xi->setting_csum.V4Transmit.IpChecksum)
115 {
116 KdPrint((__DRIVER_NAME " IpChecksum not enabled\n"));
117 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
118 //return TRUE;
119 }
120 if (csum_info->Transmit.NdisPacketTcpChecksum)
121 {
122 if (xi->setting_csum.V4Transmit.TcpChecksum)
123 {
124 flags |= NETTXF_csum_blank | NETTXF_data_validated;
125 }
126 else
127 {
128 KdPrint((__DRIVER_NAME " TcpChecksum not enabled\n"));
129 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
130 //return TRUE;
131 }
132 }
133 else if (csum_info->Transmit.NdisPacketUdpChecksum)
134 {
135 if (xi->setting_csum.V4Transmit.UdpChecksum)
136 {
137 flags |= NETTXF_csum_blank | NETTXF_data_validated;
138 }
139 else
140 {
141 KdPrint((__DRIVER_NAME " UdpChecksum not enabled\n"));
142 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
143 //return TRUE;
144 }
145 }
146 }
147 else if (csum_info->Transmit.NdisPacketChecksumV6)
148 {
149 KdPrint((__DRIVER_NAME " NdisPacketChecksumV6 not supported\n"));
150 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
151 //return TRUE;
152 }
153 }
155 mss = PtrToUlong(NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo));
157 if (mss)
158 {
159 if (NDIS_GET_PACKET_PROTOCOL_TYPE(packet) != NDIS_PROTOCOL_ID_TCP_IP)
160 {
161 KdPrint((__DRIVER_NAME " mss specified when packet is not NDIS_PROTOCOL_ID_TCP_IP\n"));
162 }
163 ndis_lso = TRUE;
164 if (mss > xi->setting_max_offload)
165 {
166 KdPrint((__DRIVER_NAME " Requested MSS (%d) larger than allowed MSS (%d)\n", mss, xi->setting_max_offload));
167 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_FAILURE);
168 //FUNCTION_EXIT();
169 return TRUE;
170 }
171 }
173 sg = (PSCATTER_GATHER_LIST)NDIS_PER_PACKET_INFO_FROM_PACKET(packet, ScatterGatherListPacketInfo);
174 ASSERT(sg != NULL);
176 if (sg->NumberOfElements > 19)
177 {
178 KdPrint((__DRIVER_NAME " sg->NumberOfElements = %d\n", sg->NumberOfElements));
179 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
180 return TRUE; // we'll pretend we sent the packet here for now...
181 }
182 //ASSERT(sg->NumberOfElements <= 19);
183 if (sg->NumberOfElements + !!ndis_lso > xi->tx_ring_free)
184 {
185 KdPrint((__DRIVER_NAME " Full on send - required = %d, available = %d\n", sg->NumberOfElements + !!ndis_lso, xi->tx_ring_free));
186 //FUNCTION_EXIT();
187 return FALSE;
188 }
191 if (ndis_lso || (pi.header_length && pi.header_length > sg->Elements[sg_element].Length && pi.header == pi.header_data))
192 {
193 header_buf = get_hb_from_freelist(xi);
194 if (!header_buf)
195 {
196 KdPrint((__DRIVER_NAME " Full on send - no free hb's\n"));
197 return FALSE;
198 }
199 }
201 if (ndis_lso)
202 {
203 if (parse_result == PARSE_OK)
204 {
205 flags |= NETTXF_csum_blank | NETTXF_data_validated; /* these may be implied but not specified when lso is used*/
206 if (pi.tcp_length >= mss)
207 {
208 flags |= NETTXF_extra_info;
209 xen_gso = TRUE;
210 }
211 else
212 {
213 KdPrint((__DRIVER_NAME " large send specified when tcp_length < mss\n"));
214 }
215 }
216 else
217 {
218 KdPrint((__DRIVER_NAME " could not parse packet - no large send offload done\n"));
219 }
220 }
222 /*
223 * See io/netif.h. Must put (A) 1st request, then (B) optional extra_info, then
224 * (C) rest of requests on the ring. Only (A) has csum flags.
225 */
227 //KdPrint((__DRIVER_NAME " C\n"));
228 /* (A) */
229 // if we coalesced the header then we want to put that on first, otherwise we put on the first sg element
230 tx0 = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
231 chunks++;
232 xi->tx_ring_free--;
233 tx0->id = 0xFFFF;
234 if (header_buf)
235 {
236 ULONG remaining = pi.header_length;
237 ASSERT(pi.header_length < TX_HEADER_BUFFER_SIZE);
238 //KdPrint((__DRIVER_NAME " D - header_length = %d\n", pi.header_length));
239 memcpy(header_buf->virtual, pi.header, pi.header_length);
240 /* even though we haven't reported that we are capable of it, LSO demands that we calculate the IP Header checksum */
241 if (ndis_lso)
242 {
243 XenNet_SumIpHeader(header_buf->virtual, pi.ip4_header_length);
244 }
245 tx0->gref = (grant_ref_t)(header_buf->logical.QuadPart >> PAGE_SHIFT);
246 tx0->offset = (USHORT)header_buf->logical.LowPart & (PAGE_SIZE - 1);
247 tx0->size = (USHORT)pi.header_length;
248 ASSERT(tx0->offset + tx0->size <= PAGE_SIZE);
249 ASSERT(tx0->size);
250 /* TODO: if the next buffer contains only a small amount of data then put it on too */
251 while (remaining)
252 {
253 //KdPrint((__DRIVER_NAME " D - remaining = %d\n", remaining));
254 //KdPrint((__DRIVER_NAME " Da - sg_element = %d, sg->Elements[sg_element].Length = %d\n", sg_element, sg->Elements[sg_element].Length));
255 if (sg->Elements[sg_element].Length <= remaining)
256 {
257 remaining -= sg->Elements[sg_element].Length;
258 sg_element++;
259 }
260 else
261 {
262 sg_offset = remaining;
263 remaining = 0;
264 }
265 }
266 }
267 else
268 {
269 //KdPrint((__DRIVER_NAME " E\n"));
270 //KdPrint((__DRIVER_NAME " Eg - sg_element = %d, sg_offset = %d\n", sg_element, sg_offset));
271 //KdPrint((__DRIVER_NAME " Eh - address = %p, length = %d\n",
272 // sg->Elements[sg_element].Address.LowPart, sg->Elements[sg_element].Length));
273 tx0->gref = (grant_ref_t)(sg->Elements[sg_element].Address.QuadPart >> PAGE_SHIFT);
274 tx0->offset = (USHORT)sg->Elements[sg_element].Address.LowPart & (PAGE_SIZE - 1);
275 tx0->size = (USHORT)sg->Elements[sg_element].Length;
276 ASSERT(tx0->size);
277 sg_element++;
278 }
279 tx0->flags = flags;
280 txN = tx0;
281 xi->tx.req_prod_pvt++;
283 /* (B) */
284 if (xen_gso)
285 {
286 //KdPrint((__DRIVER_NAME " Using extra_info\n"));
287 ASSERT(flags & NETTXF_extra_info);
288 ei = (struct netif_extra_info *)RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
289 xi->tx_ring_free--;
290 ei->type = XEN_NETIF_EXTRA_TYPE_GSO;
291 ei->flags = 0;
292 ei->u.gso.size = (USHORT)mss;
293 ei->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
294 ei->u.gso.pad = 0;
295 ei->u.gso.features = 0;
296 xi->tx.req_prod_pvt++;
297 }
299 //KdPrint((__DRIVER_NAME " F\n"));
300 /* (C) */
301 while (sg_element < sg->NumberOfElements)
302 {
303 //KdPrint((__DRIVER_NAME " G - sg_element = %d, sg_offset = %d\n", sg_element, sg_offset));
304 //KdPrint((__DRIVER_NAME " H - address = %p, length = %d\n",
305 // sg->Elements[sg_element].Address.LowPart + sg_offset, sg->Elements[sg_element].Length - sg_offset));
306 txN = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
307 chunks++;
308 xi->tx_ring_free--;
309 txN->id = 0xFFFF;
310 txN->gref = (grant_ref_t)(sg->Elements[sg_element].Address.QuadPart >> PAGE_SHIFT);
311 ASSERT((sg->Elements[sg_element].Address.LowPart & (PAGE_SIZE - 1)) + sg_offset <= PAGE_SIZE);
312 txN->offset = (USHORT)(sg->Elements[sg_element].Address.LowPart + sg_offset) & (PAGE_SIZE - 1);
313 ASSERT(sg->Elements[sg_element].Length > sg_offset);
314 txN->size = (USHORT)(sg->Elements[sg_element].Length - sg_offset);
315 ASSERT(txN->offset + txN->size <= PAGE_SIZE);
316 ASSERT(txN->size);
317 tx0->size = tx0->size + txN->size;
318 txN->flags = NETTXF_more_data;
319 sg_offset = 0;
320 sg_element++;
321 xi->tx.req_prod_pvt++;
322 }
323 txN->flags &= ~NETTXF_more_data;
324 txN->id = get_id_from_freelist(xi);
325 //KdPrint((__DRIVER_NAME " send - id = %d\n", tx0->id));
326 //KdPrint((__DRIVER_NAME " TX: id = %d, hb = %p, xi->tx_shadows[txN->id].hb = %p\n", txN->id, header_buf, xi->tx_shadows[txN->id].hb));
327 ASSERT(tx0->size == pi.total_length);
328 ASSERT(!xi->tx_shadows[txN->id].hb);
329 ASSERT(!xi->tx_shadows[txN->id].packet);
330 xi->tx_shadows[txN->id].packet = packet;
331 xi->tx_shadows[txN->id].hb = header_buf;
333 if (ndis_lso)
334 {
335 //KdPrint((__DRIVER_NAME " TcpLargeSendPacketInfo = %d\n", pi.tcp_length));
336 NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = UlongToPtr(pi.tcp_length);
337 }
339 if (chunks > 19)
340 {
341 KdPrint((__DRIVER_NAME " chunks = %d\n", chunks));
342 }
343 xi->stat_tx_ok++;
345 //NDIS_SET_PACKET_STATUS(packet, NDIS_STATUS_SUCCESS);
346 //FUNCTION_EXIT();
347 xi->tx_outstanding++;
348 return TRUE;
349 }
351 /* Called at DISPATCH_LEVEL with tx_lock held */
353 static VOID
354 XenNet_SendQueuedPackets(struct xennet_info *xi)
355 {
356 PLIST_ENTRY entry;
357 PNDIS_PACKET packet;
358 int notify;
360 //FUNCTION_ENTER();
362 if (xi->device_state->suspend_resume_state_pdo != SR_STATE_RUNNING)
363 return;
365 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
366 /* if empty, the above returns head*, not NULL */
367 while (entry != &xi->tx_waiting_pkt_list)
368 {
369 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
370 //KdPrint((__DRIVER_NAME " Packet ready to send\n"));
371 if (!XenNet_HWSendPacket(xi, packet))
372 {
373 InsertHeadList(&xi->tx_waiting_pkt_list, entry);
374 break;
375 }
376 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
377 }
379 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xi->tx, notify);
380 if (notify)
381 {
382 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->event_channel);
383 }
384 //FUNCTION_EXIT();
385 }
387 //ULONG packets_outstanding = 0;
388 // Called at DISPATCH_LEVEL
389 VOID
390 XenNet_TxBufferGC(PKDPC dpc, PVOID context, PVOID arg1, PVOID arg2)
391 {
392 struct xennet_info *xi = context;
393 RING_IDX cons, prod;
394 PNDIS_PACKET head = NULL, tail = NULL;
395 PNDIS_PACKET packet;
397 UNREFERENCED_PARAMETER(dpc);
398 UNREFERENCED_PARAMETER(arg1);
399 UNREFERENCED_PARAMETER(arg2);
401 //FUNCTION_ENTER();
403 ASSERT(xi->connected);
404 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
406 KeAcquireSpinLockAtDpcLevel(&xi->tx_lock);
408 if (xi->shutting_down)
409 {
410 /* there is a chance that our Dpc had been queued just before the shutdown... */
411 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
412 return;
413 }
415 do {
416 prod = xi->tx.sring->rsp_prod;
417 KeMemoryBarrier(); /* Ensure we see responses up to 'rsp_prod'. */
419 for (cons = xi->tx.rsp_cons; cons != prod; cons++)
420 {
421 struct netif_tx_response *txrsp;
422 txrsp = RING_GET_RESPONSE(&xi->tx, cons);
424 xi->tx_ring_free++;
426 if (txrsp->status == NETIF_RSP_NULL || txrsp->id == 0xFFFF)
427 continue;
429 //KdPrint((__DRIVER_NAME " GC: id = %d, hb = %p\n", txrsp->id, xi->tx_shadows[txrsp->id].hb));
430 if (xi->tx_shadows[txrsp->id].hb)
431 {
432 put_hb_on_freelist(xi, xi->tx_shadows[txrsp->id].hb);
433 xi->tx_shadows[txrsp->id].hb = NULL;
434 }
436 if (xi->tx_shadows[txrsp->id].packet)
437 {
438 packet = xi->tx_shadows[txrsp->id].packet;
439 *(PNDIS_PACKET *)&packet->MiniportReservedEx[0] = NULL;
440 if (head)
441 *(PNDIS_PACKET *)&tail->MiniportReservedEx[0] = packet;
442 else
443 head = packet;
444 tail = packet;
445 xi->tx_shadows[txrsp->id].packet = NULL;
446 }
447 put_id_on_freelist(xi, txrsp->id);
448 }
450 xi->tx.rsp_cons = prod;
451 xi->tx.sring->rsp_event = prod + (NET_TX_RING_SIZE >> 2);
452 KeMemoryBarrier();
453 } while (prod != xi->tx.sring->rsp_prod);
455 /* if queued packets, send them now */
456 if (!xi->shutting_down)
457 XenNet_SendQueuedPackets(xi);
459 KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
461 while (head)
462 {
463 packet = (PNDIS_PACKET)head;
464 head = *(PNDIS_PACKET *)&packet->MiniportReservedEx[0];
465 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_SUCCESS);
466 xi->tx_outstanding--;
467 if (!xi->tx_outstanding && xi->shutting_down)
468 KeSetEvent(&xi->tx_idle_event, IO_NO_INCREMENT, FALSE);
469 }
471 if (xi->device_state->suspend_resume_state_pdo == SR_STATE_SUSPENDING
472 && xi->device_state->suspend_resume_state_fdo != SR_STATE_SUSPENDING
473 && xi->tx_id_free == NET_TX_RING_SIZE)
474 {
475 KdPrint((__DRIVER_NAME " Setting SR_STATE_SUSPENDING\n"));
476 xi->device_state->suspend_resume_state_fdo = SR_STATE_SUSPENDING;
477 KdPrint((__DRIVER_NAME " Notifying event channel %d\n", xi->device_state->pdo_event_channel));
478 xi->vectors.EvtChn_Notify(xi->vectors.context, xi->device_state->pdo_event_channel);
479 }
481 //FUNCTION_EXIT();
482 }
484 // called at <= DISPATCH_LEVEL
485 VOID DDKAPI
486 XenNet_SendPackets(
487 IN NDIS_HANDLE MiniportAdapterContext,
488 IN PPNDIS_PACKET PacketArray,
489 IN UINT NumberOfPackets
490 )
491 {
492 struct xennet_info *xi = MiniportAdapterContext;
493 PNDIS_PACKET packet;
494 UINT i;
495 PLIST_ENTRY entry;
496 KIRQL OldIrql;
498 //FUNCTION_ENTER();
500 if (xi->inactive)
501 {
502 for (i = 0; i < NumberOfPackets; i++)
503 {
504 NdisMSendComplete(xi->adapter_handle, PacketArray[i], NDIS_STATUS_FAILURE);
505 }
506 return;
507 }
509 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
511 // KdPrint((__DRIVER_NAME " --> " __FUNCTION__ " (packets = %d, free_requests = %d)\n", NumberOfPackets, free_requests(xi)));
512 for (i = 0; i < NumberOfPackets; i++)
513 {
514 packet = PacketArray[i];
515 //packets_outstanding++;
516 //KdPrint(("+packet = %p\n", packet));
517 ASSERT(packet);
518 *(ULONG *)&packet->MiniportReservedEx = 0;
519 entry = (PLIST_ENTRY)&packet->MiniportReservedEx[sizeof(PVOID)];
520 InsertTailList(&xi->tx_waiting_pkt_list, entry);
521 }
523 XenNet_SendQueuedPackets(xi);
525 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
527 //FUNCTION_EXIT();
528 }
530 VOID
531 XenNet_TxResumeStart(xennet_info_t *xi)
532 {
533 UNREFERENCED_PARAMETER(xi);
535 FUNCTION_ENTER();
536 /* nothing to do here - all packets were already sent */
537 FUNCTION_EXIT();
538 }
540 VOID
541 XenNet_TxResumeEnd(xennet_info_t *xi)
542 {
543 KIRQL old_irql;
545 FUNCTION_ENTER();
547 KeAcquireSpinLock(&xi->tx_lock, &old_irql);
548 XenNet_SendQueuedPackets(xi);
549 KeReleaseSpinLock(&xi->tx_lock, old_irql);
551 FUNCTION_EXIT();
552 }
554 BOOLEAN
555 XenNet_TxInit(xennet_info_t *xi)
556 {
557 USHORT i, j;
559 KeInitializeSpinLock(&xi->tx_lock);
560 KeInitializeDpc(&xi->tx_dpc, XenNet_TxBufferGC, xi);
561 /* dpcs are only serialised to a single processor */
562 KeSetTargetProcessorDpc(&xi->tx_dpc, 0);
563 //KeSetImportanceDpc(&xi->tx_dpc, HighImportance);
564 InitializeListHead(&xi->tx_waiting_pkt_list);
566 KeInitializeEvent(&xi->tx_idle_event, SynchronizationEvent, FALSE);
567 xi->tx_outstanding = 0;
568 xi->tx_ring_free = NET_TX_RING_SIZE;
570 for (i = 0; i < TX_HEADER_BUFFERS / (PAGE_SIZE / TX_HEADER_BUFFER_SIZE); i++)
571 {
572 PVOID virtual;
573 NDIS_PHYSICAL_ADDRESS logical;
574 NdisMAllocateSharedMemory(xi->adapter_handle, PAGE_SIZE, TRUE, &virtual, &logical);
575 if (virtual == NULL)
576 continue;
577 //KdPrint((__DRIVER_NAME " Allocated SharedMemory at %p\n", virtual));
578 for (j = 0; j < PAGE_SIZE / TX_HEADER_BUFFER_SIZE; j++)
579 {
580 USHORT index = i * (PAGE_SIZE / TX_HEADER_BUFFER_SIZE) + j;
581 xi->tx_hbs[index].id = index;
582 xi->tx_hbs[index].virtual = (PUCHAR)virtual + j * TX_HEADER_BUFFER_SIZE;
583 xi->tx_hbs[index].logical.QuadPart = logical.QuadPart + j * TX_HEADER_BUFFER_SIZE;
584 put_hb_on_freelist(xi, &xi->tx_hbs[index]);
585 }
586 }
587 if (i == 0)
588 KdPrint((__DRIVER_NAME " Unable to allocate any SharedMemory buffers\n"));
590 xi->tx_id_free = 0;
591 for (i = 0; i < NET_TX_RING_SIZE; i++)
592 {
593 put_id_on_freelist(xi, i);
594 }
596 return TRUE;
597 }
599 /*
600 The ring is completely closed down now. We just need to empty anything left
601 on our freelists and harvest anything left on the rings.
602 */
604 BOOLEAN
605 XenNet_TxShutdown(xennet_info_t *xi)
606 {
607 PLIST_ENTRY entry;
608 PNDIS_PACKET packet;
609 //PMDL mdl;
610 //ULONG i;
611 KIRQL OldIrql;
612 shared_buffer_t *hb;
614 FUNCTION_ENTER();
616 KeRemoveQueueDpc(&xi->tx_dpc);
617 KeFlushQueuedDpcs();
619 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
621 /* Free packets in tx queue */
622 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
623 while (entry != &xi->tx_waiting_pkt_list)
624 {
625 packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
626 NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
627 entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
628 }
630 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
632 while (xi->tx_outstanding)
633 {
634 KdPrint((__DRIVER_NAME " Waiting for all packets to be sent\n"));
635 KeWaitForSingleObject(&xi->tx_idle_event, Executive, KernelMode, FALSE, NULL);
636 }
638 KeAcquireSpinLock(&xi->tx_lock, &OldIrql);
640 while((hb = get_hb_from_freelist(xi)) != NULL)
641 {
642 /* only free the actual buffers which were aligned on a page boundary */
643 if ((PtrToUlong(hb->virtual) & (PAGE_SIZE - 1)) == 0)
644 NdisMFreeSharedMemory(xi->adapter_handle, PAGE_SIZE, TRUE, hb->virtual, hb->logical);
645 }
647 KeReleaseSpinLock(&xi->tx_lock, OldIrql);
649 FUNCTION_EXIT();
651 return TRUE;
652 }