win-pvdrivers

view xennet/xennet_common.c @ 1099:27bd2a5a4704

License change from GPL to BSD
author James Harper <james.harper@bendigoit.com.au>
date Thu Mar 13 13:38:31 2014 +1100 (2014-03-13)
parents 896402519f15
children
line source
1 /*
2 PV Drivers for Windows Xen HVM Domains
4 Copyright (c) 2014, James Harper
5 All rights reserved.
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9 * Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
11 * Redistributions in binary form must reproduce the above copyright
12 notice, this list of conditions and the following disclaimer in the
13 documentation and/or other materials provided with the distribution.
14 * Neither the name of James Harper nor the
15 names of its contributors may be used to endorse or promote products
16 derived from this software without specific prior written permission.
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
19 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 DISCLAIMED. IN NO EVENT SHALL JAMES HARPER BE LIABLE FOR ANY
22 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
30 #include "xennet.h"
32 /* Increase the header to a certain size */
33 BOOLEAN
34 XenNet_BuildHeader(packet_info_t *pi, PUCHAR header, ULONG new_header_size)
35 {
36 ULONG bytes_remaining;
38 //FUNCTION_ENTER();
40 if (!header)
41 header = pi->header;
43 if (new_header_size > pi->total_length) {
44 new_header_size = pi->total_length;
45 }
47 if (new_header_size <= pi->header_length) {
48 //FUNCTION_EXIT();
49 return TRUE; /* header is already at least the required size */
50 }
52 if (header == pi->first_mdl_virtual) {
53 XN_ASSERT(new_header_size <= PAGE_SIZE);
54 /* still working in the first buffer */
55 if (new_header_size <= pi->first_mdl_length) {
56 /* Trivially expand header_length */
57 pi->header_length = new_header_size;
58 if (pi->header_length == pi->first_mdl_length) {
59 #if NTDDI_VERSION < NTDDI_VISTA
60 NdisGetNextBuffer(pi->curr_mdl, &pi->curr_mdl);
61 #else
62 NdisGetNextMdl(pi->curr_mdl, &pi->curr_mdl);
63 #endif
64 pi->curr_mdl_offset = 0;
65 if (pi->curr_pb)
66 pi->curr_pb = pi->curr_pb->next;
67 } else {
68 pi->curr_mdl_offset = (USHORT)new_header_size;
69 }
70 }
71 }
73 bytes_remaining = new_header_size - pi->header_length;
75 while (bytes_remaining && pi->curr_mdl) {
76 ULONG copy_size;
78 XN_ASSERT(pi->curr_mdl);
79 if (MmGetMdlByteCount(pi->curr_mdl)) {
80 PUCHAR src_addr;
81 src_addr = MmGetSystemAddressForMdlSafe(pi->curr_mdl, NormalPagePriority);
82 if (!src_addr) {
83 //FUNCTION_EXIT();
84 return FALSE;
85 }
86 copy_size = min(bytes_remaining, MmGetMdlByteCount(pi->curr_mdl) - pi->curr_mdl_offset);
87 memcpy(header + pi->header_length,
88 src_addr + pi->curr_mdl_offset, copy_size);
89 pi->curr_mdl_offset = (USHORT)(pi->curr_mdl_offset + copy_size);
90 pi->header_length += copy_size;
91 bytes_remaining -= copy_size;
92 }
93 if (pi->curr_mdl_offset == MmGetMdlByteCount(pi->curr_mdl)) {
94 #if NTDDI_VERSION < NTDDI_VISTA
95 NdisGetNextBuffer(pi->curr_mdl, &pi->curr_mdl);
96 #else
97 NdisGetNextMdl(pi->curr_mdl, &pi->curr_mdl);
98 #endif
99 if (pi->curr_pb)
100 pi->curr_pb = pi->curr_pb->next;
101 pi->curr_mdl_offset = 0;
102 }
103 }
104 //KdPrint((__DRIVER_NAME " C bytes_remaining = %d, pi->curr_mdl = %p\n", bytes_remaining, pi->curr_mdl));
105 if (bytes_remaining) {
106 //KdPrint((__DRIVER_NAME " bytes_remaining\n"));
107 //FUNCTION_EXIT();
108 return FALSE;
109 }
110 //FUNCTION_EXIT();
111 return TRUE;
112 }
114 VOID
115 XenNet_ParsePacketHeader(packet_info_t *pi, PUCHAR alt_buffer, ULONG min_header_size)
116 {
117 //FUNCTION_ENTER();
119 XN_ASSERT(pi->first_mdl);
121 #if NTDDI_VERSION < NTDDI_VISTA
122 NdisQueryBufferSafe(pi->first_mdl, (PVOID)&pi->first_mdl_virtual, &pi->first_mdl_length, NormalPagePriority);
123 #else
124 NdisQueryMdl(pi->first_mdl, (PVOID)&pi->first_mdl_virtual, &pi->first_mdl_length, NormalPagePriority);
125 #endif
126 pi->curr_mdl = pi->first_mdl;
127 if (alt_buffer)
128 pi->header = alt_buffer;
129 else
130 pi->header = pi->first_mdl_virtual;
132 pi->header_length = 0;
133 pi->curr_mdl_offset = pi->first_mdl_offset;
135 pi->ip_proto = 0;
136 pi->ip_version = 0;
137 pi->ip4_header_length = 0;
138 pi->ip4_length = 0;
139 pi->tcp_header_length = 0;
140 pi->tcp_length = 0;
141 pi->split_required = 0;
143 XenNet_BuildHeader(pi, NULL, min_header_size);
145 if (!XenNet_BuildHeader(pi, NULL, (ULONG)XN_HDR_SIZE)) {
146 //KdPrint((__DRIVER_NAME " packet too small (Ethernet Header)\n"));
147 pi->parse_result = PARSE_TOO_SMALL;
148 return;
149 }
151 if (pi->header[0] == 0xFF && pi->header[1] == 0xFF
152 && pi->header[2] == 0xFF && pi->header[3] == 0xFF
153 && pi->header[4] == 0xFF && pi->header[5] == 0xFF) {
154 pi->is_broadcast = TRUE;
155 } else if (pi->header[0] & 0x01) {
156 pi->is_multicast = TRUE;
157 }
159 switch (GET_NET_PUSHORT(&pi->header[12])) { // L2 protocol field
160 case 0x0800: /* IPv4 */
161 //KdPrint((__DRIVER_NAME " IP\n"));
162 if (pi->header_length < (ULONG)(XN_HDR_SIZE + 20)) {
163 if (!XenNet_BuildHeader(pi, NULL, (ULONG)(XN_HDR_SIZE + 20))) {
164 FUNCTION_MSG("packet too small (IP Header)\n");
165 pi->parse_result = PARSE_TOO_SMALL;
166 return;
167 }
168 }
169 pi->ip_version = (pi->header[XN_HDR_SIZE + 0] & 0xF0) >> 4;
170 if (pi->ip_version != 4) {
171 //KdPrint((__DRIVER_NAME " ip_version = %d\n", pi->ip_version));
172 pi->parse_result = PARSE_UNKNOWN_TYPE;
173 return;
174 }
175 pi->ip4_header_length = (pi->header[XN_HDR_SIZE + 0] & 0x0F) << 2;
176 if (pi->header_length < (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + 20)) {
177 if (!XenNet_BuildHeader(pi, NULL, (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + 20))) {
178 //KdPrint((__DRIVER_NAME " packet too small (IP Header + IP Options + TCP Header)\n"));
179 pi->parse_result = PARSE_TOO_SMALL;
180 return;
181 }
182 }
183 break;
184 case 0x86DD: /* IPv6 */
185 //KdPrint((__DRIVER_NAME " IPv6\n"));
186 //KdPrint((__DRIVER_NAME " (not currently used)\n"));
187 pi->parse_result = PARSE_UNKNOWN_TYPE;
188 return;
189 default:
190 //KdPrint((__DRIVER_NAME " Not IP (%04x)\n", GET_NET_PUSHORT(&pi->header[12])));
191 pi->parse_result = PARSE_UNKNOWN_TYPE;
192 return;
193 }
194 pi->ip_proto = pi->header[XN_HDR_SIZE + 9];
195 pi->ip4_length = GET_NET_PUSHORT(&pi->header[XN_HDR_SIZE + 2]);
196 pi->ip_has_options = (BOOLEAN)(pi->ip4_header_length > 20);
197 switch (pi->ip_proto) {
198 case 6: // TCP
199 case 17: // UDP
200 break;
201 default:
202 //KdPrint((__DRIVER_NAME " Not TCP/UDP (%d)\n", pi->ip_proto));
203 pi->parse_result = PARSE_UNKNOWN_TYPE;
204 return;
205 }
206 pi->tcp_header_length = (pi->header[XN_HDR_SIZE + pi->ip4_header_length + 12] & 0xf0) >> 2;
208 if (pi->header_length < (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + pi->tcp_header_length)) {
209 /* we don't actually need the tcp options to analyse the header */
210 if (!XenNet_BuildHeader(pi, NULL, (ULONG)(XN_HDR_SIZE + pi->ip4_header_length + MIN_TCP_HEADER_LENGTH))) {
211 //KdPrint((__DRIVER_NAME " packet too small (IP Header + IP Options + TCP Header (not including TCP Options))\n"));
212 pi->parse_result = PARSE_TOO_SMALL;
213 return;
214 }
215 }
217 if ((ULONG)XN_HDR_SIZE + pi->ip4_length > pi->total_length) {
218 //KdPrint((__DRIVER_NAME " XN_HDR_SIZE + ip4_length (%d) > total_length (%d)\n", XN_HDR_SIZE + pi->ip4_length, pi->total_length));
219 pi->parse_result = PARSE_UNKNOWN_TYPE;
220 return;
221 }
223 pi->tcp_length = pi->ip4_length - pi->ip4_header_length - pi->tcp_header_length;
224 pi->tcp_remaining = pi->tcp_length;
225 pi->tcp_seq = GET_NET_PULONG(&pi->header[XN_HDR_SIZE + pi->ip4_header_length + 4]);
226 pi->tcp_has_options = (BOOLEAN)(pi->tcp_header_length > 20);
227 if (pi->mss > 0 && pi->tcp_length > pi->mss)
228 pi->split_required = TRUE;
230 //KdPrint((__DRIVER_NAME " ip4_length = %d\n", pi->ip4_length));
231 //KdPrint((__DRIVER_NAME " tcp_length = %d\n", pi->tcp_length));
232 //FUNCTION_EXIT();
234 pi->parse_result = PARSE_OK;
235 }
237 BOOLEAN
238 XenNet_CheckIpHeaderSum(PUCHAR header, USHORT ip4_header_length) {
239 ULONG csum = 0;
240 USHORT i;
242 XN_ASSERT(ip4_header_length > 12);
243 XN_ASSERT(!(ip4_header_length & 1));
245 for (i = 0; i < ip4_header_length; i += 2) {
246 csum += GET_NET_PUSHORT(&header[XN_HDR_SIZE + i]);
247 }
248 while (csum & 0xFFFF0000)
249 csum = (csum & 0xFFFF) + (csum >> 16);
250 return (BOOLEAN)(csum == 0xFFFF);
251 }
253 VOID
254 XenNet_SumIpHeader(PUCHAR header, USHORT ip4_header_length) {
255 ULONG csum = 0;
256 USHORT i;
258 XN_ASSERT(ip4_header_length > 12);
259 XN_ASSERT(!(ip4_header_length & 1));
261 header[XN_HDR_SIZE + 10] = 0;
262 header[XN_HDR_SIZE + 11] = 0;
263 for (i = 0; i < ip4_header_length; i += 2) {
264 csum += GET_NET_PUSHORT(&header[XN_HDR_SIZE + i]);
265 }
266 while (csum & 0xFFFF0000)
267 csum = (csum & 0xFFFF) + (csum >> 16);
268 csum = ~csum;
269 SET_NET_USHORT(&header[XN_HDR_SIZE + 10], (USHORT)csum);
270 }
272 BOOLEAN
273 XenNet_FilterAcceptPacket(struct xennet_info *xi,packet_info_t *pi)
274 {
275 ULONG i;
276 BOOLEAN is_my_multicast = FALSE;
277 BOOLEAN is_directed = FALSE;
279 if (memcmp(xi->curr_mac_addr, pi->header, ETH_ALEN) == 0)
280 {
281 is_directed = TRUE;
282 }
283 else if (pi->is_multicast)
284 {
285 for (i = 0; i < xi->multicast_list_size; i++)
286 {
287 if (memcmp(xi->multicast_list[i], pi->header, 6) == 0)
288 break;
289 }
290 if (i < xi->multicast_list_size)
291 {
292 is_my_multicast = TRUE;
293 }
294 }
295 if (is_directed && (xi->packet_filter & NDIS_PACKET_TYPE_DIRECTED))
296 {
297 return TRUE;
298 }
299 if (is_my_multicast && (xi->packet_filter & NDIS_PACKET_TYPE_MULTICAST))
300 {
301 return TRUE;
302 }
303 if (pi->is_multicast && (xi->packet_filter & NDIS_PACKET_TYPE_ALL_MULTICAST))
304 {
305 return TRUE;
306 }
307 if (pi->is_broadcast && (xi->packet_filter & NDIS_PACKET_TYPE_BROADCAST))
308 {
309 return TRUE;
310 }
311 if (xi->packet_filter & NDIS_PACKET_TYPE_PROMISCUOUS)
312 {
313 return TRUE;
314 }
315 //return TRUE;
316 return FALSE;
317 }
319 static VOID
320 XenNet_RxTxDpc(PKDPC dpc, PVOID context, PVOID arg1, PVOID arg2)
321 {
322 struct xennet_info *xi = context;
323 BOOLEAN dont_set_event;
325 UNREFERENCED_PARAMETER(dpc);
326 UNREFERENCED_PARAMETER(arg1);
327 UNREFERENCED_PARAMETER(arg2);
329 //FUNCTION_ENTER();
330 /* if Rx goes over its per-dpc quota then make sure TxBufferGC doesn't set an event as we are already guaranteed to be called again */
331 dont_set_event = XenNet_RxBufferCheck(xi);
332 XenNet_TxBufferGC(xi, dont_set_event);
333 //FUNCTION_EXIT();
334 }
336 static BOOLEAN
337 XenNet_HandleEvent_DIRQL(PVOID context)
338 {
339 struct xennet_info *xi = context;
340 //ULONG suspend_resume_state_pdo;
342 //FUNCTION_ENTER();
343 if (xi->device_state == DEVICE_STATE_ACTIVE || xi->device_state == DEVICE_STATE_DISCONNECTING) {
344 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
345 }
346 //FUNCTION_EXIT();
347 return TRUE;
348 }
350 NTSTATUS
351 XenNet_Connect(PVOID context, BOOLEAN suspend) {
352 NTSTATUS status;
353 struct xennet_info *xi = context;
354 PFN_NUMBER pfn;
355 ULONG qemu_hide_filter;
356 ULONG qemu_hide_flags_value;
357 int i;
358 ULONG state;
359 ULONG octet;
360 PCHAR tmp_string;
361 ULONG tmp_ulong;
362 LARGE_INTEGER timeout;
364 if (!suspend) {
365 xi->handle = XnOpenDevice(xi->pdo, XenNet_DeviceCallback, xi);
366 }
367 if (!xi->handle) {
368 FUNCTION_MSG("Cannot open Xen device\n");
369 return STATUS_UNSUCCESSFUL;
370 }
371 XnGetValue(xi->handle, XN_VALUE_TYPE_QEMU_HIDE_FLAGS, &qemu_hide_flags_value);
372 XnGetValue(xi->handle, XN_VALUE_TYPE_QEMU_FILTER, &qemu_hide_filter);
373 if (!(qemu_hide_flags_value & QEMU_UNPLUG_ALL_NICS) || qemu_hide_filter) {
374 FUNCTION_MSG("inactive\n");
375 xi->device_state = DEVICE_STATE_INACTIVE;
376 /* continue with setup so all the flags and capabilities are correct */
377 }
378 /* explicitly set the frontend state as it will still be 'closed' if we are restarting the adapter */
379 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateInitialising);
380 if (xi->device_state != DEVICE_STATE_INACTIVE) {
381 for (i = 0; i <= 5 && xi->backend_state != XenbusStateInitialising && xi->backend_state != XenbusStateInitWait && xi->backend_state != XenbusStateInitialised; i++) {
382 FUNCTION_MSG("Waiting for XenbusStateInitXxx\n");
383 if (xi->backend_state == XenbusStateClosed) {
384 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateInitialising);
385 }
386 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
387 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
388 }
389 if (xi->backend_state != XenbusStateInitialising && xi->backend_state != XenbusStateInitWait && xi->backend_state != XenbusStateInitialised) {
390 FUNCTION_MSG("Backend state timeout\n");
391 return STATUS_UNSUCCESSFUL;
392 }
393 if (!NT_SUCCESS(status = XnBindEvent(xi->handle, &xi->event_channel, XenNet_HandleEvent_DIRQL, xi))) {
394 FUNCTION_MSG("Cannot allocate event channel\n");
395 return STATUS_UNSUCCESSFUL;
396 }
397 FUNCTION_MSG("event_channel = %d\n", xi->event_channel);
398 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "event-channel", xi->event_channel);
399 xi->tx_sring = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG);
400 if (!xi->tx_sring) {
401 FUNCTION_MSG("Cannot allocate tx_sring\n");
402 return STATUS_UNSUCCESSFUL;
403 }
404 SHARED_RING_INIT(xi->tx_sring);
405 FRONT_RING_INIT(&xi->tx_ring, xi->tx_sring, PAGE_SIZE);
406 pfn = (PFN_NUMBER)(MmGetPhysicalAddress(xi->tx_sring).QuadPart >> PAGE_SHIFT);
407 FUNCTION_MSG("tx sring pfn = %d\n", (ULONG)pfn);
408 xi->tx_sring_gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, INVALID_GRANT_REF, XENNET_POOL_TAG);
409 FUNCTION_MSG("tx sring_gref = %d\n", xi->tx_sring_gref);
410 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "tx-ring-ref", xi->tx_sring_gref);
411 xi->rx_sring = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, XENNET_POOL_TAG);
412 if (!xi->rx_sring) {
413 FUNCTION_MSG("Cannot allocate rx_sring\n");
414 return STATUS_UNSUCCESSFUL;
415 }
416 SHARED_RING_INIT(xi->rx_sring);
417 FRONT_RING_INIT(&xi->rx_ring, xi->rx_sring, PAGE_SIZE);
418 pfn = (PFN_NUMBER)(MmGetPhysicalAddress(xi->rx_sring).QuadPart >> PAGE_SHIFT);
419 FUNCTION_MSG("rx sring pfn = %d\n", (ULONG)pfn);
420 xi->rx_sring_gref = XnGrantAccess(xi->handle, (ULONG)pfn, FALSE, INVALID_GRANT_REF, XENNET_POOL_TAG);
421 FUNCTION_MSG("rx sring_gref = %d\n", xi->rx_sring_gref);
422 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "rx-ring-ref", xi->rx_sring_gref);
424 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "request-rx-copy", 1);
425 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "request-rx-notify", 1);
426 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "feature-no-csum-offload", !xi->frontend_csum_supported);
427 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "feature-sg", (int)xi->frontend_sg_supported);
428 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "feature-gso-tcpv4", !!xi->frontend_gso_value);
429 }
431 /* backend always supports checksum offload */
432 xi->backend_csum_supported = TRUE;
434 status = XnReadInt32(xi->handle, XN_BASE_BACKEND, "feature-sg", &tmp_ulong);
435 if (NT_SUCCESS(status) && tmp_ulong) {
436 xi->backend_sg_supported = TRUE;
437 } else {
438 xi->backend_sg_supported = FALSE;
439 }
440 status = XnReadInt32(xi->handle, XN_BASE_BACKEND, "feature-gso-tcpv4", &tmp_ulong);
441 if (NT_SUCCESS(status) && tmp_ulong) {
442 xi->backend_gso_value = xi->frontend_gso_value;
443 } else {
444 xi->backend_gso_value = FALSE;
445 }
447 status = XnReadString(xi->handle, XN_BASE_BACKEND, "mac", &tmp_string);
448 state = 0;
449 octet = 0;
450 for (i = 0; state != 3 && i < (int)strlen(tmp_string); i++) {
451 if (octet == 6) {
452 state = 3;
453 break;
454 }
455 switch(state) {
456 case 0:
457 case 1:
458 if (tmp_string[i] >= '0' && tmp_string[i] <= '9') {
459 xi->perm_mac_addr[octet] |= (tmp_string[i] - '0') << ((1 - state) * 4);
460 state++;
461 } else if (tmp_string[i] >= 'A' && tmp_string[i] <= 'F') {
462 xi->perm_mac_addr[octet] |= (tmp_string[i] - 'A' + 10) << ((1 - state) * 4);
463 state++;
464 } else if (tmp_string[i] >= 'a' && tmp_string[i] <= 'f') {
465 xi->perm_mac_addr[octet] |= (tmp_string[i] - 'a' + 10) << ((1 - state) * 4);
466 state++;
467 } else {
468 state = 3;
469 }
470 break;
471 case 2:
472 if (tmp_string[i] == ':') {
473 octet++;
474 state = 0;
475 } else {
476 state = 3;
477 }
478 break;
479 }
480 }
481 if (octet != 5 || state != 2) {
482 FUNCTION_MSG("Failed to parse backend MAC address %s\n", tmp_string);
483 XnFreeMem(xi->handle, tmp_string);
484 return STATUS_UNSUCCESSFUL;
485 } else if ((xi->curr_mac_addr[0] & 0x03) != 0x02) {
486 /* only copy if curr_mac_addr is not a LUA */
487 memcpy(xi->curr_mac_addr, xi->perm_mac_addr, ETH_ALEN);
488 }
489 XnFreeMem(xi->handle, tmp_string);
490 FUNCTION_MSG("MAC address is %02X:%02X:%02X:%02X:%02X:%02X\n",
491 xi->curr_mac_addr[0], xi->curr_mac_addr[1], xi->curr_mac_addr[2],
492 xi->curr_mac_addr[3], xi->curr_mac_addr[4], xi->curr_mac_addr[5]);
494 if (xi->device_state != DEVICE_STATE_INACTIVE) {
495 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateConnected);
497 for (i = 0; i <= 5 && xi->backend_state != XenbusStateConnected; i++) {
498 FUNCTION_MSG("Waiting for XenbusStateConnected\n");
499 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
500 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
501 }
502 if (xi->backend_state != XenbusStateConnected) {
503 FUNCTION_MSG("Backend state timeout\n");
504 return STATUS_UNSUCCESSFUL;
505 }
506 XenNet_TxInit(xi);
507 XenNet_RxInit(xi);
508 }
510 /* we don't set device_state = DEVICE_STATE_ACTIVE here - has to be done during init once ndis is ready */
512 return STATUS_SUCCESS;
513 }
515 NTSTATUS
516 XenNet_Disconnect(PVOID context, BOOLEAN suspend) {
517 struct xennet_info *xi = (struct xennet_info *)context;
518 //PFN_NUMBER pfn;
519 LARGE_INTEGER timeout;
520 NTSTATUS status;
522 if (xi->device_state != DEVICE_STATE_ACTIVE && xi->device_state != DEVICE_STATE_INACTIVE) {
523 FUNCTION_MSG("state not DEVICE_STATE_(IN)ACTIVE, is %d instead\n", xi->device_state);
524 FUNCTION_EXIT();
525 return STATUS_SUCCESS;
526 }
527 if (xi->device_state != DEVICE_STATE_INACTIVE) {
528 xi->device_state = DEVICE_STATE_DISCONNECTING;
529 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateClosing);
530 while (xi->backend_state != XenbusStateClosing && xi->backend_state != XenbusStateClosed) {
531 FUNCTION_MSG("Waiting for XenbusStateClosing/Closed\n");
532 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
533 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
534 }
535 status = XnWriteInt32(xi->handle, XN_BASE_FRONTEND, "state", XenbusStateClosed);
536 while (xi->backend_state != XenbusStateClosed) {
537 FUNCTION_MSG("Waiting for XenbusStateClosed\n");
538 timeout.QuadPart = -10 * 1000 * 1000; /* 1 second */
539 KeWaitForSingleObject(&xi->backend_event, Executive, KernelMode, FALSE, &timeout);
540 }
541 XnUnbindEvent(xi->handle, xi->event_channel);
543 #if NTDDI_VERSION < WINXP
544 KeFlushQueuedDpcs();
545 #endif
546 XenNet_TxShutdown(xi);
547 XenNet_RxShutdown(xi);
548 XnEndAccess(xi->handle, xi->rx_sring_gref, FALSE, XENNET_POOL_TAG);
549 ExFreePoolWithTag(xi->rx_sring, XENNET_POOL_TAG);
550 XnEndAccess(xi->handle, xi->tx_sring_gref, FALSE, XENNET_POOL_TAG);
551 ExFreePoolWithTag(xi->tx_sring, XENNET_POOL_TAG);
552 }
553 if (!suspend) {
554 XnCloseDevice(xi->handle);
555 }
556 xi->device_state = DEVICE_STATE_DISCONNECTED;
557 return STATUS_SUCCESS;
558 }
560 VOID
561 XenNet_DeviceCallback(PVOID context, ULONG callback_type, PVOID value) {
562 struct xennet_info *xi = (struct xennet_info *)context;
563 ULONG state;
564 NTSTATUS status;
566 FUNCTION_ENTER();
567 switch (callback_type) {
568 case XN_DEVICE_CALLBACK_BACKEND_STATE:
569 state = (ULONG)(ULONG_PTR)value;
570 if (state == xi->backend_state) {
571 FUNCTION_MSG("same state %d\n", state);
572 FUNCTION_EXIT();
573 }
574 FUNCTION_MSG("XenBusState = %d -> %d\n", xi->backend_state, state);
575 xi->backend_state = state;
576 KeSetEvent(&xi->backend_event, 0, FALSE);
577 break;
578 case XN_DEVICE_CALLBACK_SUSPEND:
579 FUNCTION_MSG("XN_DEVICE_CALLBACK_SUSPEND");
580 XenNet_Disconnect(xi, TRUE);
581 break;
582 case XN_DEVICE_CALLBACK_RESUME:
583 FUNCTION_MSG("XN_DEVICE_CALLBACK_RESUME");
584 xi->device_state = DEVICE_STATE_INITIALISING;
585 status = XenNet_Connect(xi, TRUE);
586 // TODO: what to do here if not success?
587 if (xi->device_state != DEVICE_STATE_INACTIVE) {
588 xi->device_state = DEVICE_STATE_ACTIVE;
589 }
590 KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL);
591 break;
592 }
593 FUNCTION_EXIT();
594 }