win-pvdrivers

view xennet/xennet_common.c @ 1070:05ece536b204

Fix LSO bug on FIN packets. Add RxCoalesce option (default on) to work around Cisco VPN issues
author James Harper <james.harper@bendigoit.com.au>
date Wed Nov 13 07:56:13 2013 +1100 (2013-11-13)
parents 00d29add6a2a
children a60d401aa020
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 /* Increase the header to a certain size */
24 BOOLEAN
25 XenNet_BuildHeader(packet_info_t *pi, PUCHAR header, ULONG new_header_size)
26 {
27 ULONG bytes_remaining;
29 //FUNCTION_ENTER();
31 if (!header)
32 header = pi->header;
34 if (new_header_size > pi->total_length) {
35 new_header_size = pi->total_length;
36 }
38 if (new_header_size <= pi->header_length) {
39 //FUNCTION_EXIT();
40 return TRUE; /* header is already at least the required size */
41 }
43 if (header == pi->first_mdl_virtual) {
44 ASSERT(new_header_size <= PAGE_SIZE);
45 /* still working in the first buffer */
46 if (new_header_size <= pi->first_mdl_length) {
47 /* Trivially expand header_length */
48 pi->header_length = new_header_size;
49 if (pi->header_length == pi->first_mdl_length) {
50 #if NTDDI_VERSION < NTDDI_VISTA
51 NdisGetNextBuffer(pi->curr_mdl, &pi->curr_mdl);
52 #else
53 NdisGetNextMdl(pi->curr_mdl, &pi->curr_mdl);
54 #endif
55 pi->curr_mdl_offset = 0;
56 if (pi->curr_pb)
57 pi->curr_pb = pi->curr_pb->next;
58 } else {
59 pi->curr_mdl_offset = (USHORT)new_header_size;
60 }
61 }
62 } else {
63 ASSERT(new_header_size <= MAX_LOOKAHEAD_LENGTH + MAX_ETH_HEADER_LENGTH);
64 }
66 bytes_remaining = new_header_size - pi->header_length;
68 while (bytes_remaining && pi->curr_mdl) {
69 ULONG copy_size;
71 XN_ASSERT(pi->curr_mdl);
72 if (MmGetMdlByteCount(pi->curr_mdl)) {
73 PUCHAR src_addr;
74 src_addr = MmGetSystemAddressForMdlSafe(pi->curr_mdl, NormalPagePriority);
75 if (!src_addr) {
76 //FUNCTION_EXIT();
77 return FALSE;
78 }
79 copy_size = min(bytes_remaining, MmGetMdlByteCount(pi->curr_mdl) - pi->curr_mdl_offset);
80 memcpy(header + pi->header_length,
81 src_addr + pi->curr_mdl_offset, copy_size);
82 pi->curr_mdl_offset = (USHORT)(pi->curr_mdl_offset + copy_size);
83 pi->header_length += copy_size;
84 bytes_remaining -= copy_size;
85 }
86 if (pi->curr_mdl_offset == MmGetMdlByteCount(pi->curr_mdl)) {
87 #if NTDDI_VERSION < NTDDI_VISTA
88 NdisGetNextBuffer(pi->curr_mdl, &pi->curr_mdl);
89 #else
90 NdisGetNextMdl(pi->curr_mdl, &pi->curr_mdl);
91 #endif
92 if (pi->curr_pb)
93 pi->curr_pb = pi->curr_pb->next;
94 pi->curr_mdl_offset = 0;
95 }
96 }
97 //KdPrint((__DRIVER_NAME " C bytes_remaining = %d, pi->curr_mdl = %p\n", bytes_remaining, pi->curr_mdl));
98 if (bytes_remaining) {
99 //KdPrint((__DRIVER_NAME " bytes_remaining\n"));
100 //FUNCTION_EXIT();
101 return FALSE;
102 }
103 //FUNCTION_EXIT();
104 return TRUE;
105 }
107 VOID
108 XenNet_ParsePacketHeader(packet_info_t *pi, PUCHAR alt_buffer, ULONG min_header_size)
109 {
110 //FUNCTION_ENTER();
112 XN_ASSERT(pi->first_mdl);
114 #if NTDDI_VERSION < NTDDI_VISTA
115 NdisQueryBufferSafe(pi->first_mdl, (PVOID)&pi->first_mdl_virtual, &pi->first_mdl_length, NormalPagePriority);
116 #else
117 NdisQueryMdl(pi->first_mdl, (PVOID)&pi->first_mdl_virtual, &pi->first_mdl_length, NormalPagePriority);
118 #endif
119 pi->curr_mdl = pi->first_mdl;
120 if (alt_buffer)
121 pi->header = alt_buffer;
122 else
123 pi->header = pi->first_mdl_virtual;
125 pi->header_length = 0;
126 pi->curr_mdl_offset = pi->first_mdl_offset;
128 pi->ip_proto = 0;
129 pi->ip_version = 0;
130 pi->ip4_header_length = 0;
131 pi->ip4_length = 0;
132 pi->tcp_header_length = 0;
133 pi->tcp_length = 0;
134 pi->split_required = 0;
136 XenNet_BuildHeader(pi, NULL, min_header_size);
138 if (!XenNet_BuildHeader(pi, NULL, (ULONG)XN_HDR_SIZE)) {
139 //KdPrint((__DRIVER_NAME " packet too small (Ethernet Header)\n"));
140 pi->parse_result = PARSE_TOO_SMALL;
141 return;
142 }
144 if (pi->header[0] == 0xFF && pi->header[1] == 0xFF
145 && pi->header[2] == 0xFF && pi->header[3] == 0xFF
146 && pi->header[4] == 0xFF && pi->header[5] == 0xFF) {
147 pi->is_broadcast = TRUE;
148 } else if (pi->header[0] & 0x01) {
149 pi->is_multicast = TRUE;
150 }
152 switch (GET_NET_PUSHORT(&pi->header[12])) { // L2 protocol field
153 case 0x0800: /* IPv4 */
154 //KdPrint((__DRIVER_NAME " IP\n"));
155 if (pi->header_length < (ULONG)(XN_HDR_SIZE + 20)) {
156 if (!XenNet_BuildHeader(pi, NULL, (ULONG)(XN_HDR_SIZE + 20))) {
157 FUNCTION_MSG("packet too small (IP Header)\n");
158 pi->parse_result = PARSE_TOO_SMALL;
159 return;
160 }
161 }
162 pi->ip_version = (pi->header[XN_HDR_SIZE + 0] & 0xF0) >> 4;
163 if (pi->ip_version != 4) {
164 //KdPrint((__DRIVER_NAME " ip_version = %d\n", pi->ip_version));
165 pi->parse_result = PARSE_UNKNOWN_TYPE;
166 return;
167 }
168 pi->ip4_header_length = (pi->header[XN_HDR_SIZE + 0] & 0x0F) << 2;
169 if (pi->header_length < (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + 20)) {
170 if (!XenNet_BuildHeader(pi, NULL, (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + 20))) {
171 //KdPrint((__DRIVER_NAME " packet too small (IP Header + IP Options + TCP Header)\n"));
172 pi->parse_result = PARSE_TOO_SMALL;
173 return;
174 }
175 }
176 break;
177 case 0x86DD: /* IPv6 */
178 //KdPrint((__DRIVER_NAME " IPv6\n"));
179 //KdPrint((__DRIVER_NAME " (not currently used)\n"));
180 pi->parse_result = PARSE_UNKNOWN_TYPE;
181 return;
182 default:
183 //KdPrint((__DRIVER_NAME " Not IP (%04x)\n", GET_NET_PUSHORT(&pi->header[12])));
184 pi->parse_result = PARSE_UNKNOWN_TYPE;
185 return;
186 }
187 pi->ip_proto = pi->header[XN_HDR_SIZE + 9];
188 pi->ip4_length = GET_NET_PUSHORT(&pi->header[XN_HDR_SIZE + 2]);
189 pi->ip_has_options = (BOOLEAN)(pi->ip4_header_length > 20);
190 switch (pi->ip_proto) {
191 case 6: // TCP
192 case 17: // UDP
193 break;
194 default:
195 //KdPrint((__DRIVER_NAME " Not TCP/UDP (%d)\n", pi->ip_proto));
196 pi->parse_result = PARSE_UNKNOWN_TYPE;
197 return;
198 }
199 pi->tcp_header_length = (pi->header[XN_HDR_SIZE + pi->ip4_header_length + 12] & 0xf0) >> 2;
201 if (pi->header_length < (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + pi->tcp_header_length)) {
202 /* we don't actually need the tcp options to analyse the header */
203 if (!XenNet_BuildHeader(pi, NULL, (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + MIN_TCP_HEADER_LENGTH))) {
204 //KdPrint((__DRIVER_NAME " packet too small (IP Header + IP Options + TCP Header (not including TCP Options))\n"));
205 pi->parse_result = PARSE_TOO_SMALL;
206 return;
207 }
208 }
210 if ((ULONG)XN_HDR_SIZE + pi->ip4_length > pi->total_length) {
211 //KdPrint((__DRIVER_NAME " XN_HDR_SIZE + ip4_length (%d) > total_length (%d)\n", XN_HDR_SIZE + pi->ip4_length, pi->total_length));
212 pi->parse_result = PARSE_UNKNOWN_TYPE;
213 return;
214 }
216 pi->tcp_length = pi->ip4_length - pi->ip4_header_length - pi->tcp_header_length;
217 pi->tcp_remaining = pi->tcp_length;
218 pi->tcp_seq = GET_NET_PULONG(&pi->header[XN_HDR_SIZE + pi->ip4_header_length + 4]);
219 pi->tcp_has_options = (BOOLEAN)(pi->tcp_header_length > 20);
220 if (pi->mss > 0 && pi->tcp_length > pi->mss)
221 pi->split_required = TRUE;
223 //KdPrint((__DRIVER_NAME " ip4_length = %d\n", pi->ip4_length));
224 //KdPrint((__DRIVER_NAME " tcp_length = %d\n", pi->tcp_length));
225 //FUNCTION_EXIT();
227 pi->parse_result = PARSE_OK;
228 }
230 BOOLEAN
231 XenNet_CheckIpHeaderSum(PUCHAR header, USHORT ip4_header_length) {
232 ULONG csum = 0;
233 USHORT i;
235 XN_ASSERT(ip4_header_length > 12);
236 XN_ASSERT(!(ip4_header_length & 1));
238 for (i = 0; i < ip4_header_length; i += 2) {
239 csum += GET_NET_PUSHORT(&header[XN_HDR_SIZE + i]);
240 }
241 while (csum & 0xFFFF0000)
242 csum = (csum & 0xFFFF) + (csum >> 16);
243 return (BOOLEAN)(csum == 0xFFFF);
244 }
246 VOID
247 XenNet_SumIpHeader(PUCHAR header, USHORT ip4_header_length) {
248 ULONG csum = 0;
249 USHORT i;
251 XN_ASSERT(ip4_header_length > 12);
252 XN_ASSERT(!(ip4_header_length & 1));
254 header[XN_HDR_SIZE + 10] = 0;
255 header[XN_HDR_SIZE + 11] = 0;
256 for (i = 0; i < ip4_header_length; i += 2) {
257 csum += GET_NET_PUSHORT(&header[XN_HDR_SIZE + i]);
258 }
259 while (csum & 0xFFFF0000)
260 csum = (csum & 0xFFFF) + (csum >> 16);
261 csum = ~csum;
262 SET_NET_USHORT(&header[XN_HDR_SIZE + 10], (USHORT)csum);
263 }
265 BOOLEAN
266 XenNet_FilterAcceptPacket(struct xennet_info *xi,packet_info_t *pi)
267 {
268 ULONG i;
269 BOOLEAN is_my_multicast = FALSE;
270 BOOLEAN is_directed = FALSE;
272 if (memcmp(xi->curr_mac_addr, pi->header, ETH_ALEN) == 0)
273 {
274 is_directed = TRUE;
275 }
276 else if (pi->is_multicast)
277 {
278 for (i = 0; i < xi->multicast_list_size; i++)
279 {
280 if (memcmp(xi->multicast_list[i], pi->header, 6) == 0)
281 break;
282 }
283 if (i < xi->multicast_list_size)
284 {
285 is_my_multicast = TRUE;
286 }
287 }
288 if (is_directed && (xi->packet_filter & NDIS_PACKET_TYPE_DIRECTED))
289 {
290 return TRUE;
291 }
292 if (is_my_multicast && (xi->packet_filter & NDIS_PACKET_TYPE_MULTICAST))
293 {
294 return TRUE;
295 }
296 if (pi->is_multicast && (xi->packet_filter & NDIS_PACKET_TYPE_ALL_MULTICAST))
297 {
298 return TRUE;
299 }
300 if (pi->is_broadcast && (xi->packet_filter & NDIS_PACKET_TYPE_BROADCAST))
301 {
302 return TRUE;
303 }
304 if (xi->packet_filter & NDIS_PACKET_TYPE_PROMISCUOUS)
305 {
306 return TRUE;
307 }
308 //return TRUE;
309 return FALSE;
310 }
312 static VOID
313 XenNet_RxTxDpc(PKDPC dpc, PVOID context, PVOID arg1, PVOID arg2)
314 {
315 struct xennet_info *xi = context;
316 BOOLEAN dont_set_event;
318 UNREFERENCED_PARAMETER(dpc);
319 UNREFERENCED_PARAMETER(arg1);
320 UNREFERENCED_PARAMETER(arg2);
322 //FUNCTION_ENTER();
323 /* if Rx goes over its per-dpc quota then make sure TxBufferGC doesn't set an event as we are already guaranteed to be called again */
324 dont_set_event = XenNet_RxBufferCheck(xi);
325 XenNet_TxBufferGC(xi, dont_set_event);
326 //FUNCTION_EXIT();
327 }
329 static BOOLEAN
330 XenNet_HandleEvent_DIRQL(PVOID context)
331 {
332 struct xennet_info *xi = context;
333 //ULONG suspend_resume_state_pdo;
335 //FUNCTION_ENTER();
336 if (xi->device_state == DEVICE_STATE_ACTIVE || xi->device_state == DEVICE_STATE_DISCONNECTING) {
337 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
338 }
339 //FUNCTION_EXIT();
340 return TRUE;
341 }
343 NTSTATUS
344 XenNet_Connect(PVOID context, BOOLEAN suspend) {
345 NTSTATUS status;
346 struct xennet_info *xi = context;
347 PFN_NUMBER pfn;
348 ULONG qemu_hide_filter;
349 ULONG qemu_hide_flags_value;
350 int i;
351 ULONG state;
352 ULONG octet;
353 PCHAR tmp_string;
354 ULONG tmp_ulong;
355 LARGE_INTEGER timeout;
357 if (!suspend) {
358 xi->handle = XnOpenDevice(xi->pdo, XenNet_DeviceCallback, xi);
359 }
360 if (!xi->handle) {
361 FUNCTION_MSG("Cannot open Xen device\n");
362 return STATUS_UNSUCCESSFUL;
363 }
364 XnGetValue(xi->handle, XN_VALUE_TYPE_QEMU_HIDE_FLAGS, &qemu_hide_flags_value);
365 XnGetValue(xi->handle, XN_VALUE_TYPE_QEMU_FILTER, &qemu_hide_filter);
366 if (!(qemu_hide_flags_value & QEMU_UNPLUG_ALL_NICS) || qemu_hide_filter) {
367 FUNCTION_MSG("inactive\n");
368 xi->device_state = DEVICE_STATE_INACTIVE;
369 /* continue with setup so all the flags and capabilities are correct */
370 }
372 if (xi->device_state != DEVICE_STATE_INACTIVE) {
373 for (i = 0; i <= 5 && xi->backend_state != XenbusStateInitialising && xi->backend_state != XenbusStateInitWait && xi->backend_state != XenbusStateInitialised; i++) {
374 FUNCTION_MSG("Waiting for XenbusStateInitXxx\n");
375 if (xi->backend_state == XenbusStateClosed) {
376 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateInitialising);
377 }
378 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
379 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
380 }
381 if (xi->backend_state != XenbusStateInitialising && xi->backend_state != XenbusStateInitWait && xi->backend_state != XenbusStateInitialised) {
382 FUNCTION_MSG("Backend state timeout\n");
383 return STATUS_UNSUCCESSFUL;
384 }
385 if (!NT_SUCCESS(status = XnBindEvent(xi->handle, &xi->event_channel, XenNet_HandleEvent_DIRQL, xi))) {
386 FUNCTION_MSG("Cannot allocate event channel\n");
387 return STATUS_UNSUCCESSFUL;
388 }
389 FUNCTION_MSG("event_channel = %d\n", xi->event_channel);
390 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "event-channel", xi->event_channel);
391 xi->tx_sring = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG);
392 if (!xi->tx_sring) {
393 FUNCTION_MSG("Cannot allocate tx_sring\n");
394 return STATUS_UNSUCCESSFUL;
395 }
396 SHARED_RING_INIT(xi->tx_sring);
397 FRONT_RING_INIT(&xi->tx_ring, xi->tx_sring, PAGE_SIZE);
398 pfn = (PFN_NUMBER)(MmGetPhysicalAddress(xi->tx_sring).QuadPart >> PAGE_SHIFT);
399 FUNCTION_MSG("tx sring pfn = %d\n", (ULONG)pfn);
400 xi->tx_sring_gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, INVALID_GRANT_REF, XENNET_POOL_TAG);
401 FUNCTION_MSG("tx sring_gref = %d\n", xi->tx_sring_gref);
402 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "tx-ring-ref", xi->tx_sring_gref);
403 xi->rx_sring = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG);
404 if (!xi->rx_sring) {
405 FUNCTION_MSG("Cannot allocate rx_sring\n");
406 return STATUS_UNSUCCESSFUL;
407 }
408 SHARED_RING_INIT(xi->rx_sring);
409 FRONT_RING_INIT(&xi->rx_ring, xi->rx_sring, PAGE_SIZE);
410 pfn = (PFN_NUMBER)(MmGetPhysicalAddress(xi->rx_sring).QuadPart >> PAGE_SHIFT);
411 FUNCTION_MSG("rx sring pfn = %d\n", (ULONG)pfn);
412 xi->rx_sring_gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, INVALID_GRANT_REF, XENNET_POOL_TAG);
413 FUNCTION_MSG("rx sring_gref = %d\n", xi->rx_sring_gref);
414 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "rx-ring-ref", xi->rx_sring_gref);
416 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "request-rx-copy", 1);
417 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "request-rx-notify", 1);
418 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "feature-no-csum-offload", !xi->frontend_csum_supported);
419 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "feature-sg", (int)xi->frontend_sg_supported);
420 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "feature-gso-tcpv4", !!xi->frontend_gso_value);
421 }
422 status = XnReadInt32(xi->handle, XN_BASE_BACKEND, "feature-sg", &tmp_ulong);
423 if (tmp_ulong) {
424 xi->backend_sg_supported = TRUE;
425 }
426 status = XnReadInt32(xi->handle, XN_BASE_BACKEND, "feature-gso-tcpv4", &tmp_ulong);
427 if (tmp_ulong) {
428 xi->backend_gso_value = xi->frontend_gso_value;
429 }
431 status = XnReadString(xi->handle, XN_BASE_BACKEND, "mac", &tmp_string);
432 state = 0;
433 octet = 0;
434 for (i = 0; state != 3 && i < (int)strlen(tmp_string); i++) {
435 if (octet == 6) {
436 state = 3;
437 break;
438 }
439 switch(state) {
440 case 0:
441 case 1:
442 if (tmp_string[i] >= '0' && tmp_string[i] <= '9') {
443 xi->perm_mac_addr[octet] |= (tmp_string[i] - '0') << ((1 - state) * 4);
444 state++;
445 } else if (tmp_string[i] >= 'A' && tmp_string[i] <= 'F') {
446 xi->perm_mac_addr[octet] |= (tmp_string[i] - 'A' + 10) << ((1 - state) * 4);
447 state++;
448 } else if (tmp_string[i] >= 'a' && tmp_string[i] <= 'f') {
449 xi->perm_mac_addr[octet] |= (tmp_string[i] - 'a' + 10) << ((1 - state) * 4);
450 state++;
451 } else {
452 state = 3;
453 }
454 break;
455 case 2:
456 if (tmp_string[i] == ':') {
457 octet++;
458 state = 0;
459 } else {
460 state = 3;
461 }
462 break;
463 }
464 }
465 if (octet != 5 || state != 2) {
466 FUNCTION_MSG("Failed to parse backend MAC address %s\n", tmp_string);
467 XnFreeMem(xi->handle, tmp_string);
468 return STATUS_UNSUCCESSFUL;
469 } else if ((xi->curr_mac_addr[0] & 0x03) != 0x02) {
470 /* only copy if curr_mac_addr is not a LUA */
471 memcpy(xi->curr_mac_addr, xi->perm_mac_addr, ETH_ALEN);
472 }
473 XnFreeMem(xi->handle, tmp_string);
474 FUNCTION_MSG("MAC address is %02X:%02X:%02X:%02X:%02X:%02X\n",
475 xi->curr_mac_addr[0], xi->curr_mac_addr[1], xi->curr_mac_addr[2],
476 xi->curr_mac_addr[3], xi->curr_mac_addr[4], xi->curr_mac_addr[5]);
478 if (xi->device_state != DEVICE_STATE_INACTIVE) {
479 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateConnected);
481 for (i = 0; i <= 5 && xi->backend_state != XenbusStateConnected; i++) {
482 FUNCTION_MSG("Waiting for XenbusStateConnected\n");
483 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
484 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
485 }
486 if (xi->backend_state != XenbusStateConnected) {
487 FUNCTION_MSG("Backend state timeout\n");
488 return STATUS_UNSUCCESSFUL;
489 }
490 XenNet_TxInit(xi);
491 XenNet_RxInit(xi);
492 }
494 /* we don't set device_state = DEVICE_STATE_ACTIVE here - has to be done during init once ndis is ready */
496 return STATUS_SUCCESS;
497 }
499 NTSTATUS
500 XenNet_Disconnect(PVOID context, BOOLEAN suspend) {
501 struct xennet_info *xi = (struct xennet_info *)context;
502 //PFN_NUMBER pfn;
503 LARGE_INTEGER timeout;
504 NTSTATUS status;
506 if (xi->device_state != DEVICE_STATE_ACTIVE && xi->device_state != DEVICE_STATE_INACTIVE) {
507 FUNCTION_MSG("state not DEVICE_STATE_(IN)ACTIVE, is %d instead\n", xi->device_state);
508 FUNCTION_EXIT();
509 return STATUS_SUCCESS;
510 }
511 if (xi->device_state != DEVICE_STATE_INACTIVE) {
512 xi->device_state = DEVICE_STATE_DISCONNECTING;
513 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateClosing);
514 while (xi->backend_state != XenbusStateClosing && xi->backend_state != XenbusStateClosed) {
515 FUNCTION_MSG("Waiting for XenbusStateClosing/Closed\n");
516 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
517 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
518 }
519 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateClosed);
520 while (xi->backend_state != XenbusStateClosed) {
521 FUNCTION_MSG("Waiting for XenbusStateClosed\n");
522 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
523 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
524 }
525 XnUnbindEvent(xi->handle, xi->event_channel);
527 #if NTDDI_VERSION < WINXP
528 KeFlushQueuedDpcs();
529 #endif
530 XenNet_TxShutdown(xi);
531 XenNet_RxShutdown(xi);
532 XnEndAccess(xi->handle, xi->rx_sring_gref, FALSE, XENNET_POOL_TAG);
533 ExFreePoolWithTag(xi->rx_sring, XENNET_POOL_TAG);
534 XnEndAccess(xi->handle, xi->tx_sring_gref, FALSE, XENNET_POOL_TAG);
535 ExFreePoolWithTag(xi->tx_sring, XENNET_POOL_TAG);
536 }
537 if (!suspend) {
538 XnCloseDevice(xi->handle);
539 }
540 xi->device_state = DEVICE_STATE_DISCONNECTED;
541 return STATUS_SUCCESS;
542 }
544 VOID
545 XenNet_DeviceCallback(PVOID context, ULONG callback_type, PVOID value) {
546 struct xennet_info *xi = (struct xennet_info *)context;
547 ULONG state;
548 NTSTATUS status;
550 FUNCTION_ENTER();
551 switch (callback_type) {
552 case XN_DEVICE_CALLBACK_BACKEND_STATE:
553 state = (ULONG)(ULONG_PTR)value;
554 if (state == xi->backend_state) {
555 FUNCTION_MSG("same state %d\n", state);
556 FUNCTION_EXIT();
557 }
558 FUNCTION_MSG("XenBusState = %d -> %d\n", xi->backend_state, state);
559 xi->backend_state = state;
560 KeSetEvent(&xi->backend_event, 0, FALSE);
561 break;
562 case XN_DEVICE_CALLBACK_SUSPEND:
563 FUNCTION_MSG("XN_DEVICE_CALLBACK_SUSPEND");
564 XenNet_Disconnect(xi, TRUE);
565 break;
566 case XN_DEVICE_CALLBACK_RESUME:
567 FUNCTION_MSG("XN_DEVICE_CALLBACK_RESUME");
568 xi->device_state = DEVICE_STATE_INITIALISING;
569 status = XenNet_Connect(xi, TRUE);
570 // TODO: what to do here if not success?
571 if (xi->device_state != DEVICE_STATE_INACTIVE) {
572 xi->device_state = DEVICE_STATE_ACTIVE;
573 }
574 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
575 break;
576 }
577 FUNCTION_EXIT();
578 }