win-pvdrivers

changeset 203:2a3606fece27

Created a dynamic pool of granted pages to balance memory use against performance. Needs more work and tuning. Also doesn't free properly. Initial testing is good though.
author James Harper <james.harper@bendigoit.com.au>
date Sat Mar 01 00:40:44 2008 +1100 (2008-03-01)
parents 71b9f608bb80
children 9e4a4650518a
files common.inc common/include/xen_windows.h xennet/xennet.c xenstub/xenstub.inx
line diff
     1.1 --- a/common.inc	Fri Feb 29 20:11:24 2008 +1100
     1.2 +++ b/common.inc	Sat Mar 01 00:40:44 2008 +1100
     1.3 @@ -1,4 +1,4 @@
     1.4 -VERSION=0.8.4.20
     1.5 +VERSION=0.8.4.31
     1.6  TARGETPATH=..\Target\$(DDK_TARGET_OS)
     1.7  KMDF_VERSION=1
     1.8  !IF $(_NT_TOOLS_VERSION) > 0x700
     2.1 --- a/common/include/xen_windows.h	Fri Feb 29 20:11:24 2008 +1100
     2.2 +++ b/common/include/xen_windows.h	Sat Mar 01 00:40:44 2008 +1100
     2.3 @@ -94,7 +94,7 @@ FreeSplitString(char **Bits, int Count)
     2.4  #define ALLOCATE_PAGES_POOL_TAG (ULONG) 'APPT'
     2.5  
     2.6  static PMDL
     2.7 -AllocatePages(int Pages)
     2.8 +AllocatePagesExtra(int Pages, int ExtraSize)
     2.9  {
    2.10    PMDL Mdl;
    2.11    PVOID Buf;
    2.12 @@ -106,22 +106,31 @@ AllocatePages(int Pages)
    2.13      return NULL;
    2.14    }
    2.15  //  KdPrint((__DRIVER_NAME " --- AllocatePages IRQL = %d, Buf = %p\n", KeGetCurrentIrql(), Buf));
    2.16 -  Mdl = IoAllocateMdl(Buf, Pages * PAGE_SIZE, FALSE, FALSE, NULL);
    2.17 +  Mdl = ExAllocatePoolWithTag(NonPagedPool, MmSizeOfMdl(Buf, Pages * PAGE_SIZE) + ExtraSize, ALLOCATE_PAGES_POOL_TAG);
    2.18 +  //Mdl = IoAllocateMdl(Buf, Pages * PAGE_SIZE, FALSE, FALSE, NULL);
    2.19    if (Mdl == NULL)
    2.20    {
    2.21      // free the memory here
    2.22      KdPrint((__DRIVER_NAME "     AllocatePages Failed at IoAllocateMdl\n"));
    2.23      return NULL;
    2.24    }
    2.25 +  
    2.26 +  MmInitializeMdl(Mdl, Buf, Pages * PAGE_SIZE);
    2.27    MmBuildMdlForNonPagedPool(Mdl);
    2.28    
    2.29    return Mdl;
    2.30  }
    2.31  
    2.32  static PMDL
    2.33 +AllocatePages(int Pages)
    2.34 +{
    2.35 +  return AllocatePagesExtra(Pages, 0);
    2.36 +}
    2.37 +
    2.38 +static PMDL
    2.39  AllocatePage()
    2.40  {
    2.41 -  return AllocatePages(1);
    2.42 +  return AllocatePagesExtra(1, 0);
    2.43  }
    2.44  
    2.45  static VOID
    2.46 @@ -129,7 +138,8 @@ FreePages(PMDL Mdl)
    2.47  {
    2.48    PVOID Buf = MmGetMdlVirtualAddress(Mdl);
    2.49  //  KdPrint((__DRIVER_NAME " --- FreePages IRQL = %d, Buf = %p\n", KeGetCurrentIrql(), Buf));
    2.50 -  IoFreeMdl(Mdl);
    2.51 +//  IoFreeMdl(Mdl);
    2.52 +  ExFreePoolWithTag(Mdl, ALLOCATE_PAGES_POOL_TAG);
    2.53    ExFreePoolWithTag(Buf, ALLOCATE_PAGES_POOL_TAG);
    2.54  }
    2.55  
     3.1 --- a/xennet/xennet.c	Fri Feb 29 20:11:24 2008 +1100
     3.2 +++ b/xennet/xennet.c	Sat Mar 01 00:40:44 2008 +1100
     3.3 @@ -59,6 +59,8 @@ static LARGE_INTEGER ProfTime_RxBufferCh
     3.4  static LARGE_INTEGER ProfTime_Linearize;
     3.5  static LARGE_INTEGER ProfTime_SendPackets;
     3.6  static LARGE_INTEGER ProfTime_SendQueuedPackets;
     3.7 +static LARGE_INTEGER ProfTime_GrantAccess;
     3.8 +static LARGE_INTEGER ProfTime_EndAccess;
     3.9  
    3.10  static int ProfCount_TxBufferGC;
    3.11  static int ProfCount_TxBufferFree;
    3.12 @@ -69,6 +71,8 @@ static int ProfCount_RxBufferCheck;
    3.13  static int ProfCount_Linearize;
    3.14  static int ProfCount_SendPackets;
    3.15  static int ProfCount_SendQueuedPackets;
    3.16 +static int ProfCount_GrantAccess;
    3.17 +static int ProfCount_EndAccess;
    3.18  
    3.19  struct xennet_info
    3.20  {
    3.21 @@ -114,6 +118,11 @@ struct xennet_info
    3.22    struct netif_tx_sring *tx_pgs;
    3.23    struct netif_rx_sring *rx_pgs;
    3.24  
    3.25 +  
    3.26 +  PMDL page_list[NET_TX_RING_SIZE + 1 + NET_RX_RING_SIZE];
    3.27 +  ULONG page_free;
    3.28 +  KSPIN_LOCK page_lock;
    3.29 +
    3.30    /* MDLs for the above */
    3.31    PMDL tx_mdl;
    3.32    PMDL rx_mdl;
    3.33 @@ -124,9 +133,9 @@ struct xennet_info
    3.34    PNDIS_PACKET tx_pkts[NET_TX_RING_SIZE+1];
    3.35    PNDIS_BUFFER rx_buffers[NET_RX_RING_SIZE];
    3.36  
    3.37 -  grant_ref_t gref_tx_head;
    3.38 +//  grant_ref_t gref_tx_head;
    3.39    grant_ref_t grant_tx_ref[NET_TX_RING_SIZE+1];
    3.40 -  grant_ref_t gref_rx_head;
    3.41 +//  grant_ref_t gref_rx_head;
    3.42    grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
    3.43  
    3.44    /* Receive-ring batched refills. */
    3.45 @@ -199,7 +208,70 @@ get_id_from_freelist(struct xennet_info 
    3.46    return id;
    3.47  }
    3.48  
    3.49 -VOID
    3.50 +static PMDL
    3.51 +get_page_from_freelist(struct xennet_info *xi)
    3.52 +{
    3.53 +  PMDL mdl;
    3.54 +  KIRQL OldIrql;
    3.55 +
    3.56 +//  KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
    3.57 +
    3.58 +  KeAcquireSpinLock(&xi->page_lock, &OldIrql);
    3.59 +
    3.60 +  if (xi->page_free == 0)
    3.61 +  {
    3.62 +    mdl = AllocatePagesExtra(1, sizeof(grant_ref_t));
    3.63 +    *(grant_ref_t *)(((UCHAR *)mdl) + MmSizeOfMdl(0, PAGE_SIZE)) = xi->XenInterface.GntTbl_GrantAccess(
    3.64 +      xi->XenInterface.InterfaceHeader.Context, 0,
    3.65 +      *MmGetMdlPfnArray(mdl), FALSE);
    3.66 +//    KdPrint(("New Mdl = %p, MmGetMdlVirtualAddress = %p, MmGetSystemAddressForMdlSafe = %p\n",
    3.67 +//      mdl, MmGetMdlVirtualAddress(mdl), MmGetSystemAddressForMdlSafe(mdl, NormalPagePriority)));
    3.68 +  }
    3.69 +  else
    3.70 +  {
    3.71 +    xi->page_free--;
    3.72 +    mdl = xi->page_list[xi->page_free];
    3.73 +//    KdPrint(("Old Mdl = %p, MmGetMdlVirtualAddress = %p, MmGetSystemAddressForMdlSafe = %p\n",
    3.74 +//      mdl, MmGetMdlVirtualAddress(mdl), MmGetSystemAddressForMdlSafe(mdl, NormalPagePriority)));
    3.75 +  }
    3.76 +  KeReleaseSpinLock(&xi->page_lock, OldIrql);
    3.77 +
    3.78 +//  KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
    3.79 +
    3.80 +  return mdl;
    3.81 +}
    3.82 +
    3.83 +static __inline grant_ref_t
    3.84 +get_grant_ref(PMDL mdl)
    3.85 +{
    3.86 +  return *(grant_ref_t *)(((UCHAR *)mdl) + MmSizeOfMdl(0, PAGE_SIZE));
    3.87 +}
    3.88 +
    3.89 +static VOID
    3.90 +put_page_on_freelist(struct xennet_info *xi, PMDL mdl)
    3.91 +{
    3.92 +  KIRQL OldIrql;
    3.93 +
    3.94 +//  KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
    3.95 +
    3.96 +//  KdPrint(("Mdl = %p\n",  mdl));
    3.97 +
    3.98 +  KeAcquireSpinLock(&xi->page_lock, &OldIrql);
    3.99 +
   3.100 +  xi->page_list[xi->page_free] = mdl;
   3.101 +  xi->page_free++;
   3.102 +
   3.103 +  KeReleaseSpinLock(&xi->page_lock, OldIrql);
   3.104 +
   3.105 +//  KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
   3.106 +
   3.107 +/*
   3.108 +  xi->XenInterface.GntTbl_EndAccess(xi->XenInterface.InterfaceHeader.Context,
   3.109 +        *(grant_ref_t *)(((UCHAR *)mdl) + MmSizeOfMdl(0, PAGE_SIZE)));
   3.110 +*/
   3.111 +}
   3.112 +
   3.113 +static VOID
   3.114  XenNet_SendQueuedPackets(struct xennet_info *xi);
   3.115  
   3.116  // Called at DISPATCH_LEVEL
   3.117 @@ -209,9 +281,9 @@ XenNet_TxBufferGC(struct xennet_info *xi
   3.118    RING_IDX cons, prod;
   3.119    unsigned short id;
   3.120    PNDIS_PACKET pkt;
   3.121 -  PMDL pmdl;
   3.122 -  PVOID ptr;
   3.123    LARGE_INTEGER tsc, dummy;
   3.124 +  int moretodo;
   3.125 +//  LARGE_INTEGER gtsc;
   3.126  
   3.127    ASSERT(xi->connected);
   3.128    ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
   3.129 @@ -235,16 +307,18 @@ XenNet_TxBufferGC(struct xennet_info *xi
   3.130  
   3.131        id  = txrsp->id;
   3.132        pkt = xi->tx_pkts[id];
   3.133 +/*
   3.134 +gtsc = KeQueryPerformanceCounter(&dummy);
   3.135        xi->XenInterface.GntTbl_EndAccess(xi->XenInterface.InterfaceHeader.Context,
   3.136          xi->grant_tx_ref[id]);
   3.137 +ProfTime_EndAccess.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - gtsc.QuadPart;
   3.138 +ProfCount_EndAccess++;
   3.139 +*/
   3.140        xi->grant_tx_ref[id] = GRANT_INVALID_REF;
   3.141        add_id_to_freelist(xi, id);
   3.142  
   3.143 -      /* free linearized data page */
   3.144 -      pmdl = *(PMDL *)pkt->MiniportReservedEx;
   3.145 -      ptr = MmGetMdlVirtualAddress(pmdl);
   3.146 -      IoFreeMdl(pmdl);
   3.147 -      NdisFreeMemory(ptr, 0, 0); // <= DISPATCH_LEVEL
   3.148 +      put_page_on_freelist(xi, *(PMDL *)pkt->MiniportReservedEx);
   3.149 +
   3.150        InterlockedDecrement(&xi->tx_outstanding);
   3.151        xi->stat_tx_ok++;
   3.152        NdisMSendComplete(xi->adapter_handle, pkt, NDIS_STATUS_SUCCESS);
   3.153 @@ -252,18 +326,8 @@ XenNet_TxBufferGC(struct xennet_info *xi
   3.154  
   3.155      xi->tx.rsp_cons = prod;
   3.156  
   3.157 -    /*
   3.158 -     * Set a new event, then check for race with update of tx_cons.
   3.159 -     * Note that it is essential to schedule a callback, no matter
   3.160 -     * how few buffers are pending. Even if there is space in the
   3.161 -     * transmit ring, higher layers may be blocked because too much
   3.162 -     * data is outstanding: in such cases notification from Xen is
   3.163 -     * likely to be the only kick that we'll get.
   3.164 -     */
   3.165 -    xi->tx.sring->rsp_event =
   3.166 -      prod + ((xi->tx.sring->req_prod - prod) >> 1) + 1;
   3.167 -    KeMemoryBarrier();
   3.168 -  } while ((cons == prod) && (prod != xi->tx.sring->rsp_prod));
   3.169 +    RING_FINAL_CHECK_FOR_RESPONSES(&xi->tx, moretodo);
   3.170 +  } while (moretodo);
   3.171  
   3.172    KeReleaseSpinLockFromDpcLevel(&xi->tx_lock);
   3.173  
   3.174 @@ -295,11 +359,8 @@ XenNet_TxBufferFree(struct xennet_info *
   3.175    {
   3.176      packet = CONTAINING_RECORD(entry, NDIS_PACKET, MiniportReservedEx[sizeof(PVOID)]);
   3.177  
   3.178 -    /* free linearized data page */
   3.179 -    pmdl = *(PMDL *)packet->MiniportReservedEx;
   3.180 -    ptr = MmGetMdlVirtualAddress(pmdl);
   3.181 -    IoFreeMdl(pmdl);
   3.182 -    NdisFreeMemory(ptr, 0, 0); // <= DISPATCH_LEVEL
   3.183 +    put_page_on_freelist(xi, *(PMDL *)packet->MiniportReservedEx);
   3.184 +
   3.185      NdisMSendComplete(xi->adapter_handle, packet, NDIS_STATUS_FAILURE);
   3.186      entry = RemoveHeadList(&xi->tx_waiting_pkt_list);
   3.187    }
   3.188 @@ -310,9 +371,11 @@ XenNet_TxBufferFree(struct xennet_info *
   3.189        continue;
   3.190  
   3.191      packet = xi->tx_pkts[id];
   3.192 +/*
   3.193      xi->XenInterface.GntTbl_EndAccess(xi->XenInterface.InterfaceHeader.Context,
   3.194        xi->grant_tx_ref[id]);
   3.195      xi->grant_tx_ref[id] = GRANT_INVALID_REF;
   3.196 +*/
   3.197      add_id_to_freelist(xi, id);
   3.198  
   3.199      /* free linearized data page */
   3.200 @@ -340,6 +403,7 @@ XenNet_RxBufferAlloc(struct xennet_info 
   3.201    buffer_entry_t *buffer_entry;
   3.202    NDIS_STATUS status;
   3.203    LARGE_INTEGER tsc, dummy;
   3.204 +  LARGE_INTEGER gtsc;
   3.205  
   3.206  //  KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
   3.207    tsc = KeQueryPerformanceCounter(&dummy);
   3.208 @@ -375,9 +439,12 @@ XenNet_RxBufferAlloc(struct xennet_info 
   3.209      xi->rx_buffers[id] = buffer;
   3.210      req = RING_GET_REQUEST(&xi->rx, req_prod + i);
   3.211      /* an NDIS_BUFFER is just a MDL, so we can get its pfn array */
   3.212 +gtsc = KeQueryPerformanceCounter(&dummy);
   3.213      ref = xi->XenInterface.GntTbl_GrantAccess(
   3.214        xi->XenInterface.InterfaceHeader.Context, 0,
   3.215        *MmGetMdlPfnArray(buffer), FALSE);
   3.216 +ProfTime_GrantAccess.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - gtsc.QuadPart;
   3.217 +ProfCount_GrantAccess++;
   3.218      ASSERT((signed short)ref >= 0);
   3.219      xi->grant_rx_ref[id] = ref;
   3.220  
   3.221 @@ -508,6 +575,7 @@ XenNet_RxBufferCheck(struct xennet_info 
   3.222    NDIS_STATUS status;
   3.223    LARGE_INTEGER tsc, dummy;
   3.224    LARGE_INTEGER time_received;
   3.225 +  LARGE_INTEGER gtsc;
   3.226    
   3.227  //  KdPrint((__DRIVER_NAME " --> " __FUNCTION__ "\n"));
   3.228  
   3.229 @@ -545,8 +613,12 @@ XenNet_RxBufferCheck(struct xennet_info 
   3.230        xi->rx_buffers[rxrsp->id] = NULL;
   3.231        NdisAdjustBufferLength(buffer, rxrsp->status);
   3.232        NdisChainBufferAtBack(packets[packet_count], buffer);
   3.233 +gtsc = KeQueryPerformanceCounter(&dummy);
   3.234        xi->XenInterface.GntTbl_EndAccess(xi->XenInterface.InterfaceHeader.Context,
   3.235          xi->grant_rx_ref[rxrsp->id]);
   3.236 +ProfTime_EndAccess.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - gtsc.QuadPart;
   3.237 +ProfCount_EndAccess++;
   3.238 +
   3.239        xi->grant_rx_ref[rxrsp->id] = GRANT_INVALID_REF;
   3.240  
   3.241        ASSERT(!(rxrsp->flags & NETRXF_extra_info)); // not used on RX
   3.242 @@ -778,6 +850,9 @@ XenNet_Init(
   3.243  
   3.244    InitializeListHead(&xi->rx_free_buf_list);
   3.245    InitializeListHead(&xi->tx_waiting_pkt_list);
   3.246 +
   3.247 +  KeInitializeSpinLock(&xi->page_lock);
   3.248 +  xi->page_free = 0;
   3.249    
   3.250    NdisAllocatePacketPool(&status, &xi->packet_pool, XN_RX_QUEUE_LEN,
   3.251      PROTOCOL_RESERVED_SIZE_IN_PACKET);
   3.252 @@ -1492,10 +1567,9 @@ XenNet_SetInformation(
   3.253  }
   3.254  
   3.255  /* Called at DISPATCH_LEVEL with tx_lock held */
   3.256 -PMDL
   3.257 -XenNet_Linearize(PNDIS_PACKET Packet)
   3.258 +static PMDL
   3.259 +XenNet_Linearize(struct xennet_info *xi, PNDIS_PACKET Packet)
   3.260  {
   3.261 -  NDIS_STATUS status;
   3.262    PMDL pmdl;
   3.263    char *start;
   3.264    PNDIS_BUFFER buffer;
   3.265 @@ -1512,21 +1586,14 @@ XenNet_Linearize(PNDIS_PACKET Packet)
   3.266      &tot_buff_len, NormalPagePriority);
   3.267    ASSERT(tot_buff_len <= XN_MAX_PKT_SIZE);
   3.268  
   3.269 -  status = NdisAllocateMemoryWithTag(&start, PAGE_SIZE, XENNET_POOL_TAG);
   3.270 -  if (!NT_SUCCESS(status))
   3.271 +  pmdl = get_page_from_freelist(xi);
   3.272 +  if (!pmdl)
   3.273    {
   3.274 -    KdPrint(("Could not allocate memory for linearization\n"));
   3.275 +    KdPrint(("Could not allocate MDL for linearization\n"));
   3.276      return NULL;
   3.277    }
   3.278  
   3.279 -  pmdl = IoAllocateMdl(start, tot_buff_len, FALSE, FALSE, NULL);
   3.280 -  if (!pmdl)
   3.281 -  {
   3.282 -    KdPrint(("Could not allocate MDL for linearization\n"));
   3.283 -    NdisFreeMemory(start, 0, 0);
   3.284 -    return NULL;
   3.285 -  }
   3.286 -  MmBuildMdlForNonPagedPool(pmdl);
   3.287 +  start = MmGetMdlVirtualAddress(pmdl);
   3.288  
   3.289    while (buffer)
   3.290    {
   3.291 @@ -1543,7 +1610,7 @@ XenNet_Linearize(PNDIS_PACKET Packet)
   3.292    return pmdl;
   3.293  }
   3.294  
   3.295 -VOID
   3.296 +static VOID
   3.297  XenNet_SendQueuedPackets(struct xennet_info *xi)
   3.298  {
   3.299    PLIST_ENTRY entry;
   3.300 @@ -1556,6 +1623,7 @@ XenNet_SendQueuedPackets(struct xennet_i
   3.301    UINT pkt_size;
   3.302    LARGE_INTEGER tsc, dummy;
   3.303    KIRQL OldIrql2;
   3.304 +//  LARGE_INTEGER gtsc;
   3.305  
   3.306    KeRaiseIrql(DISPATCH_LEVEL, &OldIrql2);
   3.307  
   3.308 @@ -1582,11 +1650,17 @@ XenNet_SendQueuedPackets(struct xennet_i
   3.309  
   3.310      tx = RING_GET_REQUEST(&xi->tx, xi->tx.req_prod_pvt);
   3.311      tx->id = id;
   3.312 +    tx->gref = get_grant_ref(pmdl);
   3.313 +/*
   3.314 +gtsc = KeQueryPerformanceCounter(&dummy);
   3.315      tx->gref = xi->XenInterface.GntTbl_GrantAccess(
   3.316        xi->XenInterface.InterfaceHeader.Context,
   3.317        0,
   3.318        *MmGetMdlPfnArray(pmdl),
   3.319        TRUE);
   3.320 +ProfTime_GrantAccess.QuadPart += KeQueryPerformanceCounter(&dummy).QuadPart - gtsc.QuadPart;
   3.321 +ProfCount_GrantAccess++;
   3.322 +*/
   3.323      xi->grant_tx_ref[id] = tx->gref;
   3.324      tx->offset = (uint16_t)MmGetMdlByteOffset(pmdl);
   3.325      tx->size = (UINT16)pkt_size;
   3.326 @@ -1641,7 +1715,7 @@ XenNet_SendPackets(
   3.327  
   3.328      //KdPrint(("sending pkt, len %d\n", pkt_size));
   3.329  
   3.330 -    pmdl = XenNet_Linearize(curr_packet);
   3.331 +    pmdl = XenNet_Linearize(xi, curr_packet);
   3.332      if (!pmdl)
   3.333      {
   3.334        KdPrint((__DRIVER_NAME "Couldn't linearize packet!\n"));
   3.335 @@ -1678,6 +1752,8 @@ XenNet_SendPackets(
   3.336      KdPrint((__DRIVER_NAME "     Linearize         Count = %10d, Avg Time = %10ld\n", ProfCount_Linearize, (ProfCount_Linearize == 0)?0:(ProfTime_Linearize.QuadPart / ProfCount_Linearize)));
   3.337      KdPrint((__DRIVER_NAME "     SendPackets       Count = %10d, Avg Time = %10ld\n", ProfCount_SendPackets, (ProfCount_SendPackets == 0)?0:(ProfTime_SendPackets.QuadPart / ProfCount_SendPackets)));
   3.338      KdPrint((__DRIVER_NAME "     SendQueuedPackets Count = %10d, Avg Time = %10ld\n", ProfCount_SendQueuedPackets, (ProfCount_SendQueuedPackets == 0)?0:(ProfTime_SendQueuedPackets.QuadPart / ProfCount_SendQueuedPackets)));
   3.339 +    KdPrint((__DRIVER_NAME "     GrantAccess       Count = %10d, Avg Time = %10ld\n", ProfCount_GrantAccess, (ProfCount_GrantAccess == 0)?0:(ProfTime_GrantAccess.QuadPart / ProfCount_GrantAccess)));
   3.340 +    KdPrint((__DRIVER_NAME "     EndAccess         Count = %10d, Avg Time = %10ld\n", ProfCount_EndAccess, (ProfCount_EndAccess == 0)?0:(ProfTime_EndAccess.QuadPart / ProfCount_EndAccess)));
   3.341    }
   3.342    //  KdPrint((__DRIVER_NAME " <-- " __FUNCTION__ "\n"));
   3.343  }
   3.344 @@ -1840,6 +1916,8 @@ DriverEntry(
   3.345    ProfTime_Linearize.QuadPart = 0;
   3.346    ProfTime_SendPackets.QuadPart = 0;
   3.347    ProfTime_SendQueuedPackets.QuadPart = 0;
   3.348 +  ProfTime_GrantAccess.QuadPart = 0;
   3.349 +  ProfTime_EndAccess.QuadPart = 0;
   3.350  
   3.351    ProfCount_TxBufferGC = 0;
   3.352    ProfCount_TxBufferFree = 0;
   3.353 @@ -1850,6 +1928,8 @@ DriverEntry(
   3.354    ProfCount_Linearize = 0;
   3.355    ProfCount_SendPackets = 0;
   3.356    ProfCount_SendQueuedPackets = 0;
   3.357 +  ProfCount_GrantAccess = 0;
   3.358 +  ProfCount_EndAccess = 0;
   3.359  
   3.360    RtlZeroMemory(&mini_chars, sizeof(mini_chars));
   3.361  
     4.1 --- a/xenstub/xenstub.inx	Fri Feb 29 20:11:24 2008 +1100
     4.2 +++ b/xenstub/xenstub.inx	Sat Mar 01 00:40:44 2008 +1100
     4.3 @@ -19,10 +19,12 @@ ExcludeFromSelect=*
     4.4  [XenGplPv.NTx86]
     4.5  %XenStub.DRVDESC%=XenStub_Inst, XEN\CONSOLE
     4.6  %XenStub.DRVDESC%=XenStub_Inst, XEN\VFB
     4.7 +%XenStub.DRVDESC%=XenStub_Inst, XEN\VKBD
     4.8  
     4.9  [XenGplPv.NTamd64]
    4.10  %XenStub.DRVDESC%=XenStub_Inst, XEN\CONSOLE
    4.11  %XenStub.DRVDESC%=XenStub_Inst, XEN\VFB
    4.12 +%XenStub.DRVDESC%=XenStub_Inst, XEN\VKBD
    4.13  
    4.14  [XenStub_Inst.NT]
    4.15  CopyFiles=XenStub.CopyFiles