ia64/xen-unstable

view xen/arch/ia64/vmx/vtlb.c @ 16112:52d9f5028397

[IA64] Fix TLB insertion for subpaging

Without this patch, Longhorn is sure to hang up. .NET application
might hit this bug. itc.i instruction is repeated forever, because
TLB entry with smaller page size is volatile.

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
author Alex Williamson <alex.williamson@hp.com>
date Fri Oct 12 14:49:37 2007 -0600 (2007-10-12)
parents 788c39a0b905
children da8e527d20bd
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * vtlb.c: guest virtual tlb handling module.
5 * Copyright (c) 2004, Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 * XiaoYan Feng (Fleming Feng) (Fleming.feng@intel.com)
22 */
24 #include <asm/vmx_vcpu.h>
26 static thash_data_t *__alloc_chain(thash_cb_t *);
28 static inline void cch_mem_init(thash_cb_t *hcb)
29 {
30 hcb->cch_free_idx = 0;
31 hcb->cch_freelist = NULL;
32 }
34 static thash_data_t *cch_alloc(thash_cb_t *hcb)
35 {
36 thash_data_t *p;
37 if ( (p = hcb->cch_freelist) != NULL ) {
38 hcb->cch_freelist = p->next;
39 return p;
40 }
41 if (hcb->cch_free_idx < hcb->cch_sz/sizeof(thash_data_t)) {
42 p = &((thash_data_t *)hcb->cch_buf)[hcb->cch_free_idx++];
43 p->page_flags = 0;
44 p->itir = 0;
45 p->next = NULL;
46 return p;
47 }
48 return NULL;
49 }
51 /*
52 * Check to see if the address rid:va is translated by the TLB
53 */
55 static inline int __is_tr_translated(thash_data_t *trp, u64 rid, u64 va)
56 {
57 return (trp->p) && (trp->rid == rid) && ((va-trp->vadr) < PSIZE(trp->ps));
58 }
60 /*
61 * Only for GUEST TR format.
62 */
63 static int
64 __is_tr_overlap(thash_data_t *trp, u64 rid, u64 sva, u64 eva)
65 {
66 uint64_t sa1, ea1;
68 if (!trp->p || trp->rid != rid ) {
69 return 0;
70 }
71 sa1 = trp->vadr;
72 ea1 = sa1 + PSIZE(trp->ps) - 1;
73 eva -= 1;
74 if (sva > ea1 || sa1 > eva)
75 return 0;
76 else
77 return 1;
79 }
81 static thash_data_t *__vtr_lookup(VCPU *vcpu, u64 va, int is_data)
82 {
84 thash_data_t *trp;
85 int i;
86 u64 rid;
87 vcpu_get_rr(vcpu, va, &rid);
88 rid &= RR_RID_MASK;
89 if (is_data) {
90 if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
91 trp = (thash_data_t *)vcpu->arch.dtrs;
92 for (i = 0; i < NDTRS; i++, trp++) {
93 if (__is_tr_translated(trp, rid, va)) {
94 return trp;
95 }
96 }
97 }
98 }
99 else {
100 if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
101 trp = (thash_data_t *)vcpu->arch.itrs;
102 for (i = 0; i < NITRS; i++, trp++) {
103 if (__is_tr_translated(trp, rid, va)) {
104 return trp;
105 }
106 }
107 }
108 }
109 return NULL;
110 }
112 static void thash_recycle_cch(thash_cb_t *hcb, thash_data_t *hash)
113 {
114 thash_data_t *p, *q;
115 int i = 0;
117 p = hash;
118 for (i = 0; i < MAX_CCN_DEPTH; i++) {
119 p = p->next;
120 }
121 q = hash->next;
122 hash->len = 0;
123 hash->next = 0;
124 p->next = hcb->cch_freelist;
125 hcb->cch_freelist = q;
126 }
128 static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
129 {
130 u64 tag;
131 ia64_rr rr;
132 thash_data_t *head, *cch;
134 pte = pte & ~PAGE_FLAGS_RV_MASK;
135 rr.rrval = ia64_get_rr(ifa);
136 head = (thash_data_t *)ia64_thash(ifa);
137 tag = ia64_ttag(ifa);
139 /* Find a free (ie invalid) entry. */
140 cch = head;
141 while (cch) {
142 if (INVALID_VHPT(cch))
143 break;
144 cch = cch->next;
145 }
146 if (cch) {
147 /* As we insert in head, copy head. */
148 if (cch != head) {
149 local_irq_disable();
150 cch->page_flags = head->page_flags;
151 cch->itir = head->itir;
152 cch->etag = head->etag;
153 head->ti = 1;
154 local_irq_enable();
155 }
156 } else {
157 if (head->len >= MAX_CCN_DEPTH) {
158 thash_recycle_cch(hcb, head);
159 cch = cch_alloc(hcb);
160 } else {
161 cch = __alloc_chain(hcb);
162 }
163 local_irq_disable();
164 *cch = *head;
165 head->ti = 1;
166 head->next = cch;
167 head->len = cch->len + 1;
168 cch->len = 0;
169 local_irq_enable();
170 }
171 //here head is invalid
172 wmb();
173 head->page_flags=pte;
174 head->itir = rr.ps << 2;
175 *(volatile unsigned long*)&head->etag = tag;
176 return;
177 }
179 void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va, int type)
180 {
181 u64 phy_pte, psr;
182 ia64_rr mrr;
184 mrr.rrval = ia64_get_rr(va);
185 phy_pte = translate_phy_pte(v, &pte, itir, va);
187 if (itir_ps(itir) >= mrr.ps) {
188 vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
189 } else {
190 phy_pte &= ~PAGE_FLAGS_RV_MASK;
191 psr = ia64_clear_ic();
192 ia64_itc(type + 1, va, phy_pte, itir);
193 ia64_set_psr(psr);
194 ia64_srlz_i();
195 }
196 }
198 int vhpt_access_rights_fixup(VCPU *v, u64 ifa, int is_data)
199 {
200 thash_data_t *trp, *data;
201 u64 ps, tag, mask;
203 trp = __vtr_lookup(v, ifa, is_data);
204 if (trp) {
205 ps = _REGION_PAGE_SIZE(ia64_get_rr(ifa));
206 if (trp->ps < ps)
207 return 0;
208 ifa = PAGEALIGN(ifa, ps);
209 data = (thash_data_t *)ia64_thash(ifa);
210 tag = ia64_ttag(ifa);
211 do {
212 if (data->etag == tag) {
213 mask = trp->page_flags & PAGE_FLAGS_AR_PL_MASK;
214 if (mask != (data->page_flags & PAGE_FLAGS_AR_PL_MASK)) {
215 data->page_flags &= ~PAGE_FLAGS_AR_PL_MASK;
216 data->page_flags |= mask;
217 machine_tlb_purge(ifa, ps);
218 return 1;
219 }
220 return 0;
221 }
222 data = data->next;
223 } while(data);
224 }
225 return 0;
226 }
228 /*
229 * vhpt lookup
230 */
232 thash_data_t * vhpt_lookup(u64 va)
233 {
234 thash_data_t *hash, *head;
235 u64 tag, pte, itir;
237 head = (thash_data_t *)ia64_thash(va);
238 hash = head;
239 tag = ia64_ttag(va);
240 do {
241 if (hash->etag == tag)
242 break;
243 hash = hash->next;
244 } while(hash);
245 if (hash && hash != head) {
246 /* Put the entry on the front of the list (ie swap hash and head). */
247 pte = hash->page_flags;
248 hash->page_flags = head->page_flags;
249 head->page_flags = pte;
251 tag = hash->etag;
252 hash->etag = head->etag;
253 head->etag = tag;
255 itir = hash->itir;
256 hash->itir = head->itir;
257 head->itir = itir;
259 head->len = hash->len;
260 hash->len = 0;
261 return head;
262 }
263 return hash;
264 }
266 u64 guest_vhpt_lookup(u64 iha, u64 *pte)
267 {
268 u64 ret;
269 thash_data_t * data;
271 data = vhpt_lookup(iha);
272 if (data == NULL) {
273 data = __vtr_lookup(current, iha, DSIDE_TLB);
274 if (data != NULL)
275 thash_vhpt_insert(current, data->page_flags, data->itir,
276 iha, DSIDE_TLB);
277 }
279 asm volatile ("rsm psr.ic|psr.i;;"
280 "srlz.d;;"
281 "ld8.s r9=[%1];;"
282 "tnat.nz p6,p7=r9;;"
283 "(p6) mov %0=1;"
284 "(p6) mov r9=r0;"
285 "(p7) extr.u r9=r9,0,53;;"
286 "(p7) mov %0=r0;"
287 "(p7) st8 [%2]=r9;;"
288 "ssm psr.ic;;"
289 "srlz.d;;"
290 "ssm psr.i;;"
291 : "=r"(ret) : "r"(iha), "r"(pte):"memory");
292 return ret;
293 }
295 static thash_data_t * vtlb_thash(PTA vpta, u64 va, u64 vrr, u64 *tag)
296 {
297 u64 index, pfn, rid;
299 pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr);
300 rid = _REGION_ID(vrr);
301 index = (pfn ^ rid) & ((1UL << (vpta.size - 5)) - 1);
302 *tag = pfn ^ (rid << 39);
303 return (thash_data_t *)((vpta.base << PTA_BASE_SHIFT) + (index << 5));
304 }
306 /*
307 * purge software guest tlb
308 */
310 static void vtlb_purge(VCPU *v, u64 va, u64 ps)
311 {
312 thash_data_t *cur;
313 u64 start, curadr, size, psbits, tag, rr_ps, num;
314 ia64_rr vrr;
315 thash_cb_t *hcb = &v->arch.vtlb;
317 vcpu_get_rr(v, va, &vrr.rrval);
318 psbits = VMX(v, psbits[(va >> 61)]);
319 start = va & ~((1UL << ps) - 1);
320 while (psbits) {
321 curadr = start;
322 rr_ps = __ffs(psbits);
323 psbits &= ~(1UL << rr_ps);
324 num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
325 size = PSIZE(rr_ps);
326 vrr.ps = rr_ps;
327 while (num) {
328 cur = vtlb_thash(hcb->pta, curadr, vrr.rrval, &tag);
329 while (cur) {
330 if (cur->etag == tag && cur->ps == rr_ps)
331 cur->etag = 1UL << 63;
332 cur = cur->next;
333 }
334 curadr += size;
335 num--;
336 }
337 }
338 }
341 /*
342 * purge VHPT and machine TLB
343 */
344 static void vhpt_purge(VCPU *v, u64 va, u64 ps)
345 {
346 //thash_cb_t *hcb = &v->arch.vhpt;
347 thash_data_t *cur;
348 u64 start, size, tag, num;
349 ia64_rr rr;
351 start = va & ~((1UL << ps) - 1);
352 rr.rrval = ia64_get_rr(va);
353 size = PSIZE(rr.ps);
354 num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
355 while (num) {
356 cur = (thash_data_t *)ia64_thash(start);
357 tag = ia64_ttag(start);
358 while (cur) {
359 if (cur->etag == tag)
360 cur->etag = 1UL << 63;
361 cur = cur->next;
362 }
363 start += size;
364 num--;
365 }
366 machine_tlb_purge(va, ps);
367 }
369 /*
370 * Recycle all collisions chain in VTLB or VHPT.
371 *
372 */
373 void thash_recycle_cch_all(thash_cb_t *hcb)
374 {
375 int num;
376 thash_data_t *head;
378 head = hcb->hash;
379 num = (hcb->hash_sz/sizeof(thash_data_t));
380 do {
381 head->len = 0;
382 head->next = 0;
383 head++;
384 num--;
385 } while(num);
386 cch_mem_init(hcb);
387 }
390 static thash_data_t *__alloc_chain(thash_cb_t *hcb)
391 {
392 thash_data_t *cch;
394 cch = cch_alloc(hcb);
395 if (cch == NULL) {
396 thash_recycle_cch_all(hcb);
397 cch = cch_alloc(hcb);
398 }
399 return cch;
400 }
402 /*
403 * Insert an entry into hash TLB or VHPT.
404 * NOTES:
405 * 1: When inserting VHPT to thash, "va" is a must covered
406 * address by the inserted machine VHPT entry.
407 * 2: The format of entry is always in TLB.
408 * 3: The caller need to make sure the new entry will not overlap
409 * with any existed entry.
410 */
411 void vtlb_insert(VCPU *v, u64 pte, u64 itir, u64 va)
412 {
413 thash_data_t *hash_table, *cch;
414 /* int flag; */
415 ia64_rr vrr;
416 /* u64 gppn, ppns, ppne; */
417 u64 tag, len;
418 thash_cb_t *hcb = &v->arch.vtlb;
420 vcpu_get_rr(v, va, &vrr.rrval);
421 vrr.ps = itir_ps(itir);
422 VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
423 hash_table = vtlb_thash(hcb->pta, va, vrr.rrval, &tag);
424 cch = hash_table;
425 while (cch) {
426 if (INVALID_TLB(cch)) {
427 len = cch->len;
428 cch->page_flags = pte;
429 cch->len = len;
430 cch->itir = itir;
431 cch->etag = tag;
432 return;
433 }
434 cch = cch->next;
435 }
436 if (hash_table->len >= MAX_CCN_DEPTH) {
437 thash_recycle_cch(hcb, hash_table);
438 cch = cch_alloc(hcb);
439 }
440 else {
441 cch = __alloc_chain(hcb);
442 }
443 cch->page_flags = pte;
444 cch->itir = itir;
445 cch->etag = tag;
446 cch->next = hash_table->next;
447 wmb();
448 hash_table->next = cch;
449 hash_table->len += 1;
450 return;
451 }
454 int vtr_find_overlap(VCPU *vcpu, u64 va, u64 ps, int is_data)
455 {
456 thash_data_t *trp;
457 int i;
458 u64 end, rid;
460 vcpu_get_rr(vcpu, va, &rid);
461 rid &= RR_RID_MASK;
462 end = va + PSIZE(ps);
463 if (is_data) {
464 if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
465 trp = (thash_data_t *)vcpu->arch.dtrs;
466 for (i = 0; i < NDTRS; i++, trp++) {
467 if (__is_tr_overlap(trp, rid, va, end )) {
468 return i;
469 }
470 }
471 }
472 }
473 else {
474 if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
475 trp = (thash_data_t *)vcpu->arch.itrs;
476 for (i = 0; i < NITRS; i++, trp++) {
477 if (__is_tr_overlap(trp, rid, va, end )) {
478 return i;
479 }
480 }
481 }
482 }
483 return -1;
484 }
486 /*
487 * Purge entries in VTLB and VHPT
488 */
489 void thash_purge_entries(VCPU *v, u64 va, u64 ps)
490 {
491 if (vcpu_quick_region_check(v->arch.tc_regions, va))
492 vtlb_purge(v, va, ps);
493 vhpt_purge(v, va, ps);
494 }
496 void thash_purge_entries_remote(VCPU *v, u64 va, u64 ps)
497 {
498 u64 old_va = va;
499 va = REGION_OFFSET(va);
500 if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
501 vtlb_purge(v, va, ps);
502 vhpt_purge(v, va, ps);
503 }
505 u64 translate_phy_pte(VCPU *v, u64 *pte, u64 itir, u64 va)
506 {
507 u64 ps, ps_mask, paddr, maddr;
508 // ia64_rr rr;
509 union pte_flags phy_pte;
511 ps = itir_ps(itir);
512 ps_mask = ~((1UL << ps) - 1);
513 phy_pte.val = *pte;
514 paddr = *pte;
515 paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
516 maddr = lookup_domain_mpa(v->domain, paddr, NULL);
517 if (maddr & GPFN_IO_MASK) {
518 *pte |= VTLB_PTE_IO;
519 return -1;
520 }
521 /* Ensure WB attribute if pte is related to a normal mem page,
522 * which is required by vga acceleration since qemu maps shared
523 * vram buffer with WB.
524 */
525 if (phy_pte.ma != VA_MATTR_NATPAGE)
526 phy_pte.ma = VA_MATTR_WB;
528 // rr.rrval = ia64_get_rr(va);
529 // ps = rr.ps;
530 maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
531 phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
532 return phy_pte.val;
533 }
536 /*
537 * Purge overlap TCs and then insert the new entry to emulate itc ops.
538 * Notes: Only TC entry can purge and insert.
539 * 1 indicates this is MMIO
540 */
541 int thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa, int type)
542 {
543 u64 ps;//, va;
544 u64 phy_pte;
545 ia64_rr mrr;
546 int ret = 0;
548 ps = itir_ps(itir);
549 mrr.rrval = ia64_get_rr(ifa);
550 if (VMX_DOMAIN(v)) {
551 phy_pte = translate_phy_pte(v, &pte, itir, ifa);
553 if (pte & VTLB_PTE_IO)
554 ret = 1;
555 vtlb_purge(v, ifa, ps);
556 vhpt_purge(v, ifa, ps);
557 if (ps == mrr.ps) {
558 if (!(pte & VTLB_PTE_IO)) {
559 vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
560 }
561 else{
562 vtlb_insert(v, pte, itir, ifa);
563 vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
564 }
565 }
566 else if (ps > mrr.ps) {
567 vtlb_insert(v, pte, itir, ifa);
568 vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
569 if (!(pte & VTLB_PTE_IO)) {
570 vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
571 }
572 }
573 else {
574 u64 psr;
576 vtlb_insert(v, pte, itir, ifa);
577 vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
578 if (!(pte & VTLB_PTE_IO)) {
579 phy_pte &= ~PAGE_FLAGS_RV_MASK;
580 psr = ia64_clear_ic();
581 ia64_itc(type + 1, ifa, phy_pte, IA64_ITIR_PS_KEY(ps, 0));
582 ia64_set_psr(psr);
583 ia64_srlz_i();
584 }
585 }
586 }
587 else{
588 phy_pte = translate_phy_pte(v, &pte, itir, ifa);
589 if (ps != PAGE_SHIFT) {
590 vtlb_insert(v, pte, itir, ifa);
591 vcpu_quick_region_set(PSCBX(v, tc_regions), ifa);
592 }
593 machine_tlb_purge(ifa, ps);
594 vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
595 }
596 return ret;
597 }
599 /*
600 * Purge all TCs or VHPT entries including those in Hash table.
601 *
602 */
604 //TODO: add sections.
605 void thash_purge_all(VCPU *v)
606 {
607 int num;
608 thash_data_t *head;
609 thash_cb_t *vtlb,*vhpt;
610 vtlb = &v->arch.vtlb;
611 vhpt = &v->arch.vhpt;
613 for (num = 0; num < 8; num++)
614 VMX(v, psbits[num]) = 0;
616 head = vtlb->hash;
617 num = (vtlb->hash_sz/sizeof(thash_data_t));
618 do{
619 head->page_flags = 0;
620 head->etag = 1UL<<63;
621 head->itir = 0;
622 head->next = 0;
623 head++;
624 num--;
625 } while(num);
626 cch_mem_init(vtlb);
628 head = vhpt->hash;
629 num = (vhpt->hash_sz/sizeof(thash_data_t));
630 do{
631 head->page_flags = 0;
632 head->etag = 1UL<<63;
633 head->next = 0;
634 head++;
635 num--;
636 } while(num);
637 cch_mem_init(vhpt);
638 local_flush_tlb_all();
639 }
642 /*
643 * Lookup the hash table and its collision chain to find an entry
644 * covering this address rid:va or the entry.
645 *
646 * INPUT:
647 * in: TLB format for both VHPT & TLB.
648 */
650 thash_data_t *vtlb_lookup(VCPU *v, u64 va,int is_data)
651 {
652 thash_data_t *cch;
653 u64 psbits, ps, tag;
654 ia64_rr vrr;
655 thash_cb_t *hcb = &v->arch.vtlb;
657 cch = __vtr_lookup(v, va, is_data);
658 if (cch)
659 return cch;
661 if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
662 return NULL;
663 psbits = VMX(v, psbits[(va >> 61)]);
664 vcpu_get_rr(v, va, &vrr.rrval);
665 while (psbits) {
666 ps = __ffs(psbits);
667 psbits &= ~(1UL << ps);
668 vrr.ps = ps;
669 cch = vtlb_thash(hcb->pta, va, vrr.rrval, &tag);
670 do {
671 if (cch->etag == tag && cch->ps == ps)
672 return cch;
673 cch = cch->next;
674 } while(cch);
675 }
676 return NULL;
677 }
680 /*
681 * Initialize internal control data before service.
682 */
683 static void thash_init(thash_cb_t *hcb, u64 sz)
684 {
685 int num;
686 thash_data_t *head;
688 hcb->pta.val = (unsigned long)hcb->hash;
689 hcb->pta.vf = 1;
690 hcb->pta.ve = 1;
691 hcb->pta.size = sz;
693 head = hcb->hash;
694 num = (hcb->hash_sz/sizeof(thash_data_t));
695 do {
696 head->page_flags = 0;
697 head->itir = 0;
698 head->etag = 1UL << 63;
699 head->next = 0;
700 head++;
701 num--;
702 } while(num);
704 hcb->cch_free_idx = 0;
705 hcb->cch_freelist = NULL;
706 }
708 int thash_alloc(thash_cb_t *hcb, u64 sz_log2, char *what)
709 {
710 struct page_info *page;
711 void * vbase;
712 u64 sz = 1UL << sz_log2;
714 page = alloc_domheap_pages(NULL, (sz_log2 + 1 - PAGE_SHIFT), 0);
715 if (page == NULL) {
716 printk("No enough contiguous memory(%ldKB) for init_domain_%s\n",
717 sz >> (10 - 1), what);
718 return -ENOMEM;
719 }
720 vbase = page_to_virt(page);
721 memset(vbase, 0, sz + sz); // hash + collisions chain
722 if (sz_log2 >= 20 - 1)
723 printk(XENLOG_DEBUG "Allocate domain %s at 0x%p(%ldMB)\n",
724 what, vbase, sz >> (20 - 1));
725 else
726 printk(XENLOG_DEBUG "Allocate domain %s at 0x%p(%ldKB)\n",
727 what, vbase, sz >> (10 - 1));
729 hcb->hash = vbase;
730 hcb->hash_sz = sz;
731 hcb->cch_buf = (void *)((u64)vbase + hcb->hash_sz);
732 hcb->cch_sz = sz;
733 thash_init(hcb, sz_log2);
734 return 0;
735 }
737 void thash_free(thash_cb_t *hcb)
738 {
739 struct page_info *page;
741 if (hcb->hash) {
742 page = virt_to_page(hcb->hash);
743 free_domheap_pages(page, hcb->pta.size + 1 - PAGE_SHIFT);
744 hcb->hash = 0;
745 }
746 }