win-pvdrivers

view xennet/xennet_common.c @ 1065:00d29add6a2a

Tidy up xennet. Remove KdPrint calls.
author James Harper <james.harper@bendigoit.com.au>
date Thu Oct 03 18:12:10 2013 +1000 (2013-10-03)
parents 5bb1f345e06a
children 05ece536b204
line source
1 /*
2 PV Net Driver for Windows Xen HVM Domains
3 Copyright (C) 2007 James Harper
4 Copyright (C) 2007 Andrew Grover <andy.grover@oracle.com>
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
21 #include "xennet.h"
23 /* Increase the header to a certain size */
24 BOOLEAN
25 XenNet_BuildHeader(packet_info_t *pi, PUCHAR header, ULONG new_header_size)
26 {
27 ULONG bytes_remaining;
29 //FUNCTION_ENTER();
31 if (!header)
32 header = pi->header;
34 if (new_header_size > pi->total_length) {
35 new_header_size = pi->total_length;
36 }
38 if (new_header_size <= pi->header_length) {
39 //FUNCTION_EXIT();
40 return TRUE; /* header is already at least the required size */
41 }
43 if (header == pi->first_mdl_virtual) {
44 /* still working in the first buffer */
45 if (new_header_size <= pi->first_mdl_length) {
46 pi->header_length = new_header_size;
47 if (pi->header_length == pi->first_mdl_length) {
48 #if NTDDI_VERSION < NTDDI_VISTA
49 NdisGetNextBuffer(pi->curr_mdl, &pi->curr_mdl);
50 #else
51 NdisGetNextMdl(pi->curr_mdl, &pi->curr_mdl);
52 #endif
53 pi->curr_mdl_offset = 0;
54 if (pi->curr_pb)
55 pi->curr_pb = pi->curr_pb->next;
56 } else {
57 pi->curr_mdl_offset = (USHORT)new_header_size;
58 }
59 //FUNCTION_EXIT();
60 return TRUE;
61 } else {
62 memcpy(pi->header_data, header, pi->header_length);
63 header = pi->header = pi->header_data;
64 }
65 }
67 bytes_remaining = new_header_size - pi->header_length;
68 // TODO: if there are only a small number of bytes left in the current buffer then increase to consume that too... it would have to be no more than the size of header+mss though
70 while (bytes_remaining && pi->curr_mdl) {
71 ULONG copy_size;
73 XN_ASSERT(pi->curr_mdl);
74 if (MmGetMdlByteCount(pi->curr_mdl)) {
75 PUCHAR src_addr;
76 src_addr = MmGetSystemAddressForMdlSafe(pi->curr_mdl, NormalPagePriority);
77 if (!src_addr) {
78 //FUNCTION_EXIT();
79 return FALSE;
80 }
81 copy_size = min(bytes_remaining, MmGetMdlByteCount(pi->curr_mdl) - pi->curr_mdl_offset);
82 memcpy(header + pi->header_length,
83 src_addr + pi->curr_mdl_offset, copy_size);
84 pi->curr_mdl_offset = (USHORT)(pi->curr_mdl_offset + copy_size);
85 pi->header_length += copy_size;
86 bytes_remaining -= copy_size;
87 }
88 if (pi->curr_mdl_offset == MmGetMdlByteCount(pi->curr_mdl)) {
89 #if NTDDI_VERSION < NTDDI_VISTA
90 NdisGetNextBuffer(pi->curr_mdl, &pi->curr_mdl);
91 #else
92 NdisGetNextMdl(pi->curr_mdl, &pi->curr_mdl);
93 #endif
94 if (pi->curr_pb)
95 pi->curr_pb = pi->curr_pb->next;
96 pi->curr_mdl_offset = 0;
97 }
98 }
99 //KdPrint((__DRIVER_NAME " C bytes_remaining = %d, pi->curr_mdl = %p\n", bytes_remaining, pi->curr_mdl));
100 if (bytes_remaining) {
101 //KdPrint((__DRIVER_NAME " bytes_remaining\n"));
102 //FUNCTION_EXIT();
103 return FALSE;
104 }
105 //FUNCTION_EXIT();
106 return TRUE;
107 }
109 VOID
110 XenNet_ParsePacketHeader(packet_info_t *pi, PUCHAR alt_buffer, ULONG min_header_size)
111 {
112 //FUNCTION_ENTER();
114 XN_ASSERT(pi->first_mdl);
116 #if NTDDI_VERSION < NTDDI_VISTA
117 NdisQueryBufferSafe(pi->first_mdl, (PVOID)&pi->first_mdl_virtual, &pi->first_mdl_length, NormalPagePriority);
118 #else
119 NdisQueryMdl(pi->first_mdl, (PVOID)&pi->first_mdl_virtual, &pi->first_mdl_length, NormalPagePriority);
120 #endif
121 pi->curr_mdl = pi->first_mdl;
122 if (alt_buffer)
123 pi->header = alt_buffer;
124 else
125 pi->header = pi->first_mdl_virtual;
127 pi->header_length = 0;
128 pi->curr_mdl_offset = pi->first_mdl_offset;
130 pi->ip_proto = 0;
131 pi->ip_version = 0;
132 pi->ip4_header_length = 0;
133 pi->ip4_length = 0;
134 pi->tcp_header_length = 0;
135 pi->tcp_length = 0;
136 pi->split_required = 0;
138 XenNet_BuildHeader(pi, NULL, min_header_size);
140 if (!XenNet_BuildHeader(pi, NULL, (ULONG)XN_HDR_SIZE)) {
141 //KdPrint((__DRIVER_NAME " packet too small (Ethernet Header)\n"));
142 pi->parse_result = PARSE_TOO_SMALL;
143 return;
144 }
146 if (pi->header[0] == 0xFF && pi->header[1] == 0xFF
147 && pi->header[2] == 0xFF && pi->header[3] == 0xFF
148 && pi->header[4] == 0xFF && pi->header[5] == 0xFF) {
149 pi->is_broadcast = TRUE;
150 } else if (pi->header[0] & 0x01) {
151 pi->is_multicast = TRUE;
152 }
154 switch (GET_NET_PUSHORT(&pi->header[12])) { // L2 protocol field
155 case 0x0800: /* IPv4 */
156 //KdPrint((__DRIVER_NAME " IP\n"));
157 if (pi->header_length < (ULONG)(XN_HDR_SIZE + 20)) {
158 if (!XenNet_BuildHeader(pi, NULL, (ULONG)(XN_HDR_SIZE + 20))) {
159 FUNCTION_MSG("packet too small (IP Header)\n");
160 pi->parse_result = PARSE_TOO_SMALL;
161 return;
162 }
163 }
164 pi->ip_version = (pi->header[XN_HDR_SIZE + 0] & 0xF0) >> 4;
165 if (pi->ip_version != 4) {
166 //KdPrint((__DRIVER_NAME " ip_version = %d\n", pi->ip_version));
167 pi->parse_result = PARSE_UNKNOWN_TYPE;
168 return;
169 }
170 pi->ip4_header_length = (pi->header[XN_HDR_SIZE + 0] & 0x0F) << 2;
171 if (pi->header_length < (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + 20)) {
172 if (!XenNet_BuildHeader(pi, NULL, (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + 20))) {
173 //KdPrint((__DRIVER_NAME " packet too small (IP Header + IP Options + TCP Header)\n"));
174 pi->parse_result = PARSE_TOO_SMALL;
175 return;
176 }
177 }
178 break;
179 case 0x86DD: /* IPv6 */
180 //KdPrint((__DRIVER_NAME " IPv6\n"));
181 //KdPrint((__DRIVER_NAME " (not currently used)\n"));
182 pi->parse_result = PARSE_UNKNOWN_TYPE;
183 return;
184 default:
185 //KdPrint((__DRIVER_NAME " Not IP (%04x)\n", GET_NET_PUSHORT(&pi->header[12])));
186 pi->parse_result = PARSE_UNKNOWN_TYPE;
187 return;
188 }
189 pi->ip_proto = pi->header[XN_HDR_SIZE + 9];
190 pi->ip4_length = GET_NET_PUSHORT(&pi->header[XN_HDR_SIZE + 2]);
191 pi->ip_has_options = (BOOLEAN)(pi->ip4_header_length > 20);
192 switch (pi->ip_proto) {
193 case 6: // TCP
194 case 17: // UDP
195 break;
196 default:
197 //KdPrint((__DRIVER_NAME " Not TCP/UDP (%d)\n", pi->ip_proto));
198 pi->parse_result = PARSE_UNKNOWN_TYPE;
199 return;
200 }
201 pi->tcp_header_length = (pi->header[XN_HDR_SIZE + pi->ip4_header_length + 12] & 0xf0) >> 2;
203 if (pi->header_length < (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + pi->tcp_header_length)) {
204 /* we don't actually need the tcp options to analyse the header */
205 if (!XenNet_BuildHeader(pi, NULL, (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + MIN_TCP_HEADER_LENGTH))) {
206 //KdPrint((__DRIVER_NAME " packet too small (IP Header + IP Options + TCP Header (not including TCP Options))\n"));
207 pi->parse_result = PARSE_TOO_SMALL;
208 return;
209 }
210 }
212 if ((ULONG)XN_HDR_SIZE + pi->ip4_length > pi->total_length) {
213 //KdPrint((__DRIVER_NAME " XN_HDR_SIZE + ip4_length (%d) > total_length (%d)\n", XN_HDR_SIZE + pi->ip4_length, pi->total_length));
214 pi->parse_result = PARSE_UNKNOWN_TYPE;
215 return;
216 }
218 pi->tcp_length = pi->ip4_length - pi->ip4_header_length - pi->tcp_header_length;
219 pi->tcp_remaining = pi->tcp_length;
220 pi->tcp_seq = GET_NET_PULONG(&pi->header[XN_HDR_SIZE + pi->ip4_header_length + 4]);
221 pi->tcp_has_options = (BOOLEAN)(pi->tcp_header_length > 20);
222 if (pi->mss > 0 && pi->tcp_length > pi->mss)
223 pi->split_required = TRUE;
225 //KdPrint((__DRIVER_NAME " ip4_length = %d\n", pi->ip4_length));
226 //KdPrint((__DRIVER_NAME " tcp_length = %d\n", pi->tcp_length));
227 //FUNCTION_EXIT();
229 pi->parse_result = PARSE_OK;
230 }
232 BOOLEAN
233 XenNet_CheckIpHeaderSum(PUCHAR header, USHORT ip4_header_length) {
234 ULONG csum = 0;
235 USHORT i;
237 XN_ASSERT(ip4_header_length > 12);
238 XN_ASSERT(!(ip4_header_length & 1));
240 for (i = 0; i < ip4_header_length; i += 2) {
241 csum += GET_NET_PUSHORT(&header[XN_HDR_SIZE + i]);
242 }
243 while (csum & 0xFFFF0000)
244 csum = (csum & 0xFFFF) + (csum >> 16);
245 return (BOOLEAN)(csum == 0xFFFF);
246 }
248 VOID
249 XenNet_SumIpHeader(PUCHAR header, USHORT ip4_header_length) {
250 ULONG csum = 0;
251 USHORT i;
253 XN_ASSERT(ip4_header_length > 12);
254 XN_ASSERT(!(ip4_header_length & 1));
256 header[XN_HDR_SIZE + 10] = 0;
257 header[XN_HDR_SIZE + 11] = 0;
258 for (i = 0; i < ip4_header_length; i += 2) {
259 csum += GET_NET_PUSHORT(&header[XN_HDR_SIZE + i]);
260 }
261 while (csum & 0xFFFF0000)
262 csum = (csum & 0xFFFF) + (csum >> 16);
263 csum = ~csum;
264 SET_NET_USHORT(&header[XN_HDR_SIZE + 10], (USHORT)csum);
265 }
267 BOOLEAN
268 XenNet_FilterAcceptPacket(struct xennet_info *xi,packet_info_t *pi)
269 {
270 ULONG i;
271 BOOLEAN is_my_multicast = FALSE;
272 BOOLEAN is_directed = FALSE;
274 if (memcmp(xi->curr_mac_addr, pi->header, ETH_ALEN) == 0)
275 {
276 is_directed = TRUE;
277 }
278 else if (pi->is_multicast)
279 {
280 for (i = 0; i < xi->multicast_list_size; i++)
281 {
282 if (memcmp(xi->multicast_list[i], pi->header, 6) == 0)
283 break;
284 }
285 if (i < xi->multicast_list_size)
286 {
287 is_my_multicast = TRUE;
288 }
289 }
290 if (is_directed && (xi->packet_filter & NDIS_PACKET_TYPE_DIRECTED))
291 {
292 return TRUE;
293 }
294 if (is_my_multicast && (xi->packet_filter & NDIS_PACKET_TYPE_MULTICAST))
295 {
296 return TRUE;
297 }
298 if (pi->is_multicast && (xi->packet_filter & NDIS_PACKET_TYPE_ALL_MULTICAST))
299 {
300 return TRUE;
301 }
302 if (pi->is_broadcast && (xi->packet_filter & NDIS_PACKET_TYPE_BROADCAST))
303 {
304 return TRUE;
305 }
306 if (xi->packet_filter & NDIS_PACKET_TYPE_PROMISCUOUS)
307 {
308 return TRUE;
309 }
310 //return TRUE;
311 return FALSE;
312 }
314 static VOID
315 XenNet_RxTxDpc(PKDPC dpc, PVOID context, PVOID arg1, PVOID arg2)
316 {
317 struct xennet_info *xi = context;
318 BOOLEAN dont_set_event;
320 UNREFERENCED_PARAMETER(dpc);
321 UNREFERENCED_PARAMETER(arg1);
322 UNREFERENCED_PARAMETER(arg2);
324 //FUNCTION_ENTER();
325 /* if Rx goes over its per-dpc quota then make sure TxBufferGC doesn't set an event as we are already guaranteed to be called again */
326 dont_set_event = XenNet_RxBufferCheck(xi);
327 XenNet_TxBufferGC(xi, dont_set_event);
328 //FUNCTION_EXIT();
329 }
331 static BOOLEAN
332 XenNet_HandleEvent_DIRQL(PVOID context)
333 {
334 struct xennet_info *xi = context;
335 //ULONG suspend_resume_state_pdo;
337 //FUNCTION_ENTER();
338 if (xi->device_state == DEVICE_STATE_ACTIVE || xi->device_state == DEVICE_STATE_DISCONNECTING) {
339 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
340 }
341 //FUNCTION_EXIT();
342 return TRUE;
343 }
345 NTSTATUS
346 XenNet_Connect(PVOID context, BOOLEAN suspend) {
347 NTSTATUS status;
348 struct xennet_info *xi = context;
349 PFN_NUMBER pfn;
350 ULONG qemu_hide_filter;
351 ULONG qemu_hide_flags_value;
352 int i;
353 ULONG state;
354 ULONG octet;
355 PCHAR tmp_string;
356 ULONG tmp_ulong;
357 LARGE_INTEGER timeout;
359 if (!suspend) {
360 xi->handle = XnOpenDevice(xi->pdo, XenNet_DeviceCallback, xi);
361 }
362 if (!xi->handle) {
363 FUNCTION_MSG("Cannot open Xen device\n");
364 return STATUS_UNSUCCESSFUL;
365 }
366 XnGetValue(xi->handle, XN_VALUE_TYPE_QEMU_HIDE_FLAGS, &qemu_hide_flags_value);
367 XnGetValue(xi->handle, XN_VALUE_TYPE_QEMU_FILTER, &qemu_hide_filter);
368 if (!(qemu_hide_flags_value & QEMU_UNPLUG_ALL_NICS) || qemu_hide_filter) {
369 FUNCTION_MSG("inactive\n");
370 xi->device_state = DEVICE_STATE_INACTIVE;
371 /* continue with setup so all the flags and capabilities are correct */
372 }
374 if (xi->device_state != DEVICE_STATE_INACTIVE) {
375 for (i = 0; i <= 5 && xi->backend_state != XenbusStateInitialising && xi->backend_state != XenbusStateInitWait && xi->backend_state != XenbusStateInitialised; i++) {
376 FUNCTION_MSG("Waiting for XenbusStateInitXxx\n");
377 if (xi->backend_state == XenbusStateClosed) {
378 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateInitialising);
379 }
380 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
381 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
382 }
383 if (xi->backend_state != XenbusStateInitialising && xi->backend_state != XenbusStateInitWait && xi->backend_state != XenbusStateInitialised) {
384 FUNCTION_MSG("Backend state timeout\n");
385 return STATUS_UNSUCCESSFUL;
386 }
387 if (!NT_SUCCESS(status = XnBindEvent(xi->handle, &xi->event_channel, XenNet_HandleEvent_DIRQL, xi))) {
388 FUNCTION_MSG("Cannot allocate event channel\n");
389 return STATUS_UNSUCCESSFUL;
390 }
391 FUNCTION_MSG("event_channel = %d\n", xi->event_channel);
392 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "event-channel", xi->event_channel);
393 xi->tx_sring = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG);
394 if (!xi->tx_sring) {
395 FUNCTION_MSG("Cannot allocate tx_sring\n");
396 return STATUS_UNSUCCESSFUL;
397 }
398 SHARED_RING_INIT(xi->tx_sring);
399 FRONT_RING_INIT(&xi->tx_ring, xi->tx_sring, PAGE_SIZE);
400 pfn = (PFN_NUMBER)(MmGetPhysicalAddress(xi->tx_sring).QuadPart >> PAGE_SHIFT);
401 FUNCTION_MSG("tx sring pfn = %d\n", (ULONG)pfn);
402 xi->tx_sring_gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, INVALID_GRANT_REF, XENNET_POOL_TAG);
403 FUNCTION_MSG("tx sring_gref = %d\n", xi->tx_sring_gref);
404 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "tx-ring-ref", xi->tx_sring_gref);
405 xi->rx_sring = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG);
406 if (!xi->rx_sring) {
407 FUNCTION_MSG("Cannot allocate rx_sring\n");
408 return STATUS_UNSUCCESSFUL;
409 }
410 SHARED_RING_INIT(xi->rx_sring);
411 FRONT_RING_INIT(&xi->rx_ring, xi->rx_sring, PAGE_SIZE);
412 pfn = (PFN_NUMBER)(MmGetPhysicalAddress(xi->rx_sring).QuadPart >> PAGE_SHIFT);
413 FUNCTION_MSG("rx sring pfn = %d\n", (ULONG)pfn);
414 xi->rx_sring_gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, INVALID_GRANT_REF, XENNET_POOL_TAG);
415 FUNCTION_MSG("rx sring_gref = %d\n", xi->rx_sring_gref);
416 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "rx-ring-ref", xi->rx_sring_gref);
418 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "request-rx-copy", 1);
419 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "request-rx-notify", 1);
420 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "feature-no-csum-offload", !xi->frontend_csum_supported);
421 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "feature-sg", (int)xi->frontend_sg_supported);
422 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "feature-gso-tcpv4", !!xi->frontend_gso_value);
423 }
424 status = XnReadInt32(xi->handle, XN_BASE_BACKEND, "feature-sg", &tmp_ulong);
425 if (tmp_ulong) {
426 xi->backend_sg_supported = TRUE;
427 }
428 status = XnReadInt32(xi->handle, XN_BASE_BACKEND, "feature-gso-tcpv4", &tmp_ulong);
429 if (tmp_ulong) {
430 xi->backend_gso_value = xi->frontend_gso_value;
431 }
433 status = XnReadString(xi->handle, XN_BASE_BACKEND, "mac", &tmp_string);
434 state = 0;
435 octet = 0;
436 for (i = 0; state != 3 && i < (int)strlen(tmp_string); i++) {
437 if (octet == 6) {
438 state = 3;
439 break;
440 }
441 switch(state) {
442 case 0:
443 case 1:
444 if (tmp_string[i] >= '0' && tmp_string[i] <= '9') {
445 xi->perm_mac_addr[octet] |= (tmp_string[i] - '0') << ((1 - state) * 4);
446 state++;
447 } else if (tmp_string[i] >= 'A' && tmp_string[i] <= 'F') {
448 xi->perm_mac_addr[octet] |= (tmp_string[i] - 'A' + 10) << ((1 - state) * 4);
449 state++;
450 } else if (tmp_string[i] >= 'a' && tmp_string[i] <= 'f') {
451 xi->perm_mac_addr[octet] |= (tmp_string[i] - 'a' + 10) << ((1 - state) * 4);
452 state++;
453 } else {
454 state = 3;
455 }
456 break;
457 case 2:
458 if (tmp_string[i] == ':') {
459 octet++;
460 state = 0;
461 } else {
462 state = 3;
463 }
464 break;
465 }
466 }
467 if (octet != 5 || state != 2) {
468 FUNCTION_MSG("Failed to parse backend MAC address %s\n", tmp_string);
469 XnFreeMem(xi->handle, tmp_string);
470 return STATUS_UNSUCCESSFUL;
471 } else if ((xi->curr_mac_addr[0] & 0x03) != 0x02) {
472 /* only copy if curr_mac_addr is not a LUA */
473 memcpy(xi->curr_mac_addr, xi->perm_mac_addr, ETH_ALEN);
474 }
475 XnFreeMem(xi->handle, tmp_string);
476 FUNCTION_MSG("MAC address is %02X:%02X:%02X:%02X:%02X:%02X\n",
477 xi->curr_mac_addr[0], xi->curr_mac_addr[1], xi->curr_mac_addr[2],
478 xi->curr_mac_addr[3], xi->curr_mac_addr[4], xi->curr_mac_addr[5]);
480 if (xi->device_state != DEVICE_STATE_INACTIVE) {
481 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateConnected);
483 for (i = 0; i <= 5 && xi->backend_state != XenbusStateConnected; i++) {
484 FUNCTION_MSG("Waiting for XenbusStateConnected\n");
485 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
486 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
487 }
488 if (xi->backend_state != XenbusStateConnected) {
489 FUNCTION_MSG("Backend state timeout\n");
490 return STATUS_UNSUCCESSFUL;
491 }
492 XenNet_TxInit(xi);
493 XenNet_RxInit(xi);
494 }
496 /* we don't set device_state = DEVICE_STATE_ACTIVE here - has to be done during init once ndis is ready */
498 return STATUS_SUCCESS;
499 }
501 NTSTATUS
502 XenNet_Disconnect(PVOID context, BOOLEAN suspend) {
503 struct xennet_info *xi = (struct xennet_info *)context;
504 //PFN_NUMBER pfn;
505 LARGE_INTEGER timeout;
506 NTSTATUS status;
508 if (xi->device_state != DEVICE_STATE_ACTIVE && xi->device_state != DEVICE_STATE_INACTIVE) {
509 FUNCTION_MSG("state not DEVICE_STATE_(IN)ACTIVE, is %d instead\n", xi->device_state);
510 FUNCTION_EXIT();
511 return STATUS_SUCCESS;
512 }
513 if (xi->device_state != DEVICE_STATE_INACTIVE) {
514 xi->device_state = DEVICE_STATE_DISCONNECTING;
515 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateClosing);
516 while (xi->backend_state != XenbusStateClosing && xi->backend_state != XenbusStateClosed) {
517 FUNCTION_MSG("Waiting for XenbusStateClosing/Closed\n");
518 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
519 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
520 }
521 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateClosed);
522 while (xi->backend_state != XenbusStateClosed) {
523 FUNCTION_MSG("Waiting for XenbusStateClosed\n");
524 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
525 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
526 }
527 XnUnbindEvent(xi->handle, xi->event_channel);
529 #if NTDDI_VERSION < WINXP
530 KeFlushQueuedDpcs();
531 #endif
532 XenNet_TxShutdown(xi);
533 XenNet_RxShutdown(xi);
534 XnEndAccess(xi->handle, xi->rx_sring_gref, FALSE, XENNET_POOL_TAG);
535 ExFreePoolWithTag(xi->rx_sring, XENNET_POOL_TAG);
536 XnEndAccess(xi->handle, xi->tx_sring_gref, FALSE, XENNET_POOL_TAG);
537 ExFreePoolWithTag(xi->tx_sring, XENNET_POOL_TAG);
538 }
539 if (!suspend) {
540 XnCloseDevice(xi->handle);
541 }
542 xi->device_state = DEVICE_STATE_DISCONNECTED;
543 return STATUS_SUCCESS;
544 }
546 VOID
547 XenNet_DeviceCallback(PVOID context, ULONG callback_type, PVOID value) {
548 struct xennet_info *xi = (struct xennet_info *)context;
549 ULONG state;
550 NTSTATUS status;
552 FUNCTION_ENTER();
553 switch (callback_type) {
554 case XN_DEVICE_CALLBACK_BACKEND_STATE:
555 state = (ULONG)(ULONG_PTR)value;
556 if (state == xi->backend_state) {
557 FUNCTION_MSG("same state %d\n", state);
558 FUNCTION_EXIT();
559 }
560 FUNCTION_MSG("XenBusState = %d -> %d\n", xi->backend_state, state);
561 xi->backend_state = state;
562 KeSetEvent(&xi->backend_event, 0, FALSE);
563 break;
564 case XN_DEVICE_CALLBACK_SUSPEND:
565 FUNCTION_MSG("XN_DEVICE_CALLBACK_SUSPEND");
566 XenNet_Disconnect(xi, TRUE);
567 break;
568 case XN_DEVICE_CALLBACK_RESUME:
569 FUNCTION_MSG("XN_DEVICE_CALLBACK_RESUME");
570 xi->device_state = DEVICE_STATE_INITIALISING;
571 status = XenNet_Connect(xi, TRUE);
572 // TODO: what to do here if not success?
573 if (xi->device_state != DEVICE_STATE_INACTIVE) {
574 xi->device_state = DEVICE_STATE_ACTIVE;
575 }
576 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
577 break;
578 }
579 FUNCTION_EXIT();
580 }