ia64/xen-unstable

view xen/arch/ia64/vmx/vtlb.c @ 18695:4a5acf020c0f

[IA64] Fix an memory attribute issue.

We should ensure Qemu and Guest use same attribute for accessing
the VGA ram, otherwise, host may hang.
This patch fixes trivial typo in 18690:c19871b66cea.

Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Thu Oct 30 11:51:55 2008 +0900 (2008-10-30)
parents c19871b66cea
children 10f0e1bb8e5e
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vtlb.c: guest virtual tlb handling module.
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20 * XiaoYan Feng (Fleming Feng) (Fleming.feng@intel.com)
21 */
23 #include <asm/vmx_vcpu.h>
24 #include <asm/vmx_phy_mode.h>
25 #include <asm/shadow.h>
27 static u64 translate_phy_pte(VCPU *v, u64 pte, u64 itir, u64 va);
28 static thash_data_t *__alloc_chain(thash_cb_t *);
30 static inline void cch_mem_init(thash_cb_t *hcb)
31 {
32 hcb->cch_free_idx = 0;
33 hcb->cch_freelist = NULL;
34 }
36 static thash_data_t *cch_alloc(thash_cb_t *hcb)
37 {
38 thash_data_t *p;
39 if ( (p = hcb->cch_freelist) != NULL ) {
40 hcb->cch_freelist = p->next;
41 return p;
42 }
43 if (hcb->cch_free_idx < hcb->cch_sz/sizeof(thash_data_t)) {
44 p = &((thash_data_t *)hcb->cch_buf)[hcb->cch_free_idx++];
45 p->page_flags = 0;
46 p->itir = 0;
47 p->next = NULL;
48 return p;
49 }
50 return NULL;
51 }
53 /*
54 * Check to see if the address rid:va is translated by the TLB
55 */
57 static inline int __is_tr_translated(thash_data_t *trp, u64 rid, u64 va)
58 {
59 return (trp->p) && (trp->rid == rid) && ((va-trp->vadr) < PSIZE(trp->ps));
60 }
62 /*
63 * Only for GUEST TR format.
64 */
65 static int
66 __is_tr_overlap(thash_data_t *trp, u64 rid, u64 sva, u64 eva)
67 {
68 uint64_t sa1, ea1;
70 if (!trp->p || trp->rid != rid ) {
71 return 0;
72 }
73 sa1 = trp->vadr;
74 ea1 = sa1 + PSIZE(trp->ps) - 1;
75 eva -= 1;
76 if (sva > ea1 || sa1 > eva)
77 return 0;
78 else
79 return 1;
81 }
83 static thash_data_t *__vtr_lookup(VCPU *vcpu, u64 va, int is_data)
84 {
86 thash_data_t *trp;
87 int i;
88 u64 rid;
90 vcpu_get_rr(vcpu, va, &rid);
91 rid &= RR_RID_MASK;
92 if (is_data) {
93 if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
94 trp = (thash_data_t *)vcpu->arch.dtrs;
95 for (i = 0; i < NDTRS; i++, trp++) {
96 if (__is_tr_translated(trp, rid, va)) {
97 return trp;
98 }
99 }
100 }
101 }
102 else {
103 if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
104 trp = (thash_data_t *)vcpu->arch.itrs;
105 for (i = 0; i < NITRS; i++, trp++) {
106 if (__is_tr_translated(trp, rid, va)) {
107 return trp;
108 }
109 }
110 }
111 }
112 return NULL;
113 }
115 static void thash_recycle_cch(thash_cb_t *hcb, thash_data_t *hash,
116 thash_data_t *tail)
117 {
118 thash_data_t *head = hash->next;
120 hash->next = 0;
121 tail->next = hcb->cch_freelist;
122 hcb->cch_freelist = head;
123 }
125 static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
126 {
127 u64 tag, len;
128 ia64_rr rr;
129 thash_data_t *head, *cch;
131 pte &= ((~PAGE_FLAGS_RV_MASK)|_PAGE_VIRT_D);
132 rr.rrval = ia64_get_rr(ifa);
133 head = (thash_data_t *)ia64_thash(ifa);
134 tag = ia64_ttag(ifa);
136 if (!INVALID_VHPT(head)) {
137 /* Find a free (ie invalid) entry. */
138 len = 0;
139 cch = head;
140 do {
141 ++len;
142 if (cch->next == NULL) {
143 if (len >= MAX_CCN_DEPTH) {
144 thash_recycle_cch(hcb, head, cch);
145 cch = cch_alloc(hcb);
146 } else {
147 cch = __alloc_chain(hcb);
148 }
149 cch->next = head->next;
150 head->next = cch;
151 break;
152 }
153 cch = cch->next;
154 } while (!INVALID_VHPT(cch));
156 /* As we insert in head, copy head. */
157 local_irq_disable();
158 cch->page_flags = head->page_flags;
159 cch->itir = head->itir;
160 cch->etag = head->etag;
161 head->ti = 1;
162 local_irq_enable();
163 }
164 /* here head is invalid. */
165 wmb();
166 head->page_flags=pte;
167 head->itir = rr.ps << 2;
168 *(volatile unsigned long*)&head->etag = tag;
169 return;
170 }
172 void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va, int type)
173 {
174 u64 phy_pte, psr;
175 ia64_rr mrr;
177 phy_pte = translate_phy_pte(v, pte, itir, va);
178 mrr.rrval = ia64_get_rr(va);
180 if (itir_ps(itir) >= mrr.ps && VMX_MMU_MODE(v) != VMX_MMU_PHY_D) {
181 vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
182 } else {
183 if (VMX_MMU_MODE(v) == VMX_MMU_PHY_D)
184 itir = (itir & ~RR_PS_MASK) | (mrr.rrval & RR_PS_MASK);
185 phy_pte &= ~PAGE_FLAGS_RV_MASK; /* Clear reserved fields. */
186 psr = ia64_clear_ic();
187 ia64_itc(type + 1, va, phy_pte, itir);
188 ia64_set_psr(psr);
189 ia64_srlz_i();
190 }
191 }
193 /* On itr.d, old entries are not purged (optimization for Linux - see
194 vmx_vcpu_itr_d). Fixup possible mismatch. */
195 int vhpt_access_rights_fixup(VCPU *v, u64 ifa, int is_data)
196 {
197 thash_data_t *trp, *data;
198 u64 ps, tag, mask;
200 trp = __vtr_lookup(v, ifa, is_data);
201 if (trp) {
202 ps = _REGION_PAGE_SIZE(ia64_get_rr(ifa));
203 if (trp->ps < ps)
204 return 0;
205 ifa = PAGEALIGN(ifa, ps);
206 data = (thash_data_t *)ia64_thash(ifa);
207 tag = ia64_ttag(ifa);
208 do {
209 if (data->etag == tag) {
210 mask = trp->page_flags & PAGE_FLAGS_AR_PL_MASK;
211 if (mask != (data->page_flags & PAGE_FLAGS_AR_PL_MASK)) {
212 data->page_flags &= ~PAGE_FLAGS_AR_PL_MASK;
213 data->page_flags |= mask;
214 machine_tlb_purge(ifa, ps);
215 return 1;
216 }
217 return 0;
218 }
219 data = data->next;
220 } while(data);
221 }
222 return 0;
223 }
225 /*
226 * vhpt lookup
227 */
229 thash_data_t * vhpt_lookup(u64 va)
230 {
231 thash_data_t *hash, *head;
232 u64 tag, pte, itir;
234 head = (thash_data_t *)ia64_thash(va);
235 hash = head;
236 tag = ia64_ttag(va);
237 do {
238 if (hash->etag == tag)
239 break;
240 hash = hash->next;
241 } while(hash);
242 if (hash && hash != head) {
243 /* Put the entry on the front of the list (ie swap hash and head). */
244 pte = hash->page_flags;
245 hash->page_flags = head->page_flags;
246 head->page_flags = pte;
248 tag = hash->etag;
249 hash->etag = head->etag;
250 head->etag = tag;
252 itir = hash->itir;
253 hash->itir = head->itir;
254 head->itir = itir;
256 return head;
257 }
258 return hash;
259 }
261 u64 guest_vhpt_lookup(u64 iha, u64 *pte)
262 {
263 u64 ret, tmp;
264 thash_data_t * data;
266 /* Try to fill mTLB for the gVHPT entry. */
267 data = vhpt_lookup(iha);
268 if (data == NULL) {
269 data = __vtr_lookup(current, iha, DSIDE_TLB);
270 if (data != NULL)
271 thash_vhpt_insert(current, data->page_flags, data->itir,
272 iha, DSIDE_TLB);
273 }
275 asm volatile ("rsm psr.ic|psr.i;;"
276 "srlz.d;;"
277 "ld8.s %1=[%2];;" /* Read VHPT entry. */
278 "tnat.nz p6,p7=%1;;" /* Success ? */
279 "(p6) mov %0=1;" /* No -> ret = 1. */
280 "(p6) mov %1=r0;"
281 "(p7) extr.u %1=%1,0,53;;" /* Yes -> mask ig bits. */
282 "(p7) mov %0=r0;" /* -> ret = 0. */
283 "(p7) st8 [%3]=%1;;" /* -> save. */
284 "ssm psr.ic;;"
285 "srlz.d;;"
286 "ssm psr.i;;"
287 : "=r"(ret), "=r"(tmp)
288 : "r"(iha), "r"(pte):"memory","p6","p7");
289 return ret;
290 }
292 static thash_data_t * vtlb_thash(PTA vpta, u64 va, u64 vrr, u64 *tag)
293 {
294 u64 index, pfn, rid;
296 pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr);
297 rid = _REGION_ID(vrr);
298 index = (pfn ^ rid) & ((1UL << (vpta.size - 5)) - 1);
299 *tag = pfn ^ (rid << 39);
300 return (thash_data_t *)((vpta.base << PTA_BASE_SHIFT) + (index << 5));
301 }
303 /*
304 * purge software guest tlb
305 */
307 static void vtlb_purge(VCPU *v, u64 va, u64 ps)
308 {
309 thash_data_t *cur;
310 u64 start, curadr, size, psbits, tag, rr_ps, num;
311 ia64_rr vrr;
312 thash_cb_t *hcb = &v->arch.vtlb;
314 vcpu_get_rr(v, va, &vrr.rrval);
315 psbits = VMX(v, psbits[(va >> 61)]);
316 start = va & ~((1UL << ps) - 1);
317 while (psbits) {
318 curadr = start;
319 rr_ps = __ffs(psbits);
320 psbits &= ~(1UL << rr_ps);
321 num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
322 size = PSIZE(rr_ps);
323 vrr.ps = rr_ps;
324 while (num) {
325 cur = vtlb_thash(hcb->pta, curadr, vrr.rrval, &tag);
326 while (cur) {
327 if (cur->etag == tag && cur->ps == rr_ps) {
328 cur->etag = 1UL << 63;
329 break;
330 }
331 cur = cur->next;
332 }
333 curadr += size;
334 num--;
335 }
336 }
337 }
340 /*
341 * purge VHPT and machine TLB
342 */
343 static void vhpt_purge(VCPU *v, u64 va, u64 ps)
344 {
345 //thash_cb_t *hcb = &v->arch.vhpt;
346 thash_data_t *cur;
347 u64 start, size, tag, num;
348 ia64_rr rr;
350 start = va & ~((1UL << ps) - 1);
351 rr.rrval = ia64_get_rr(va);
352 size = PSIZE(rr.ps);
353 num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
354 while (num) {
355 cur = (thash_data_t *)ia64_thash(start);
356 tag = ia64_ttag(start);
357 while (cur) {
358 if (cur->etag == tag) {
359 cur->etag = 1UL << 63;
360 break;
361 }
362 cur = cur->next;
363 }
364 start += size;
365 num--;
366 }
367 machine_tlb_purge(va, ps);
368 }
370 /*
371 * Recycle all collisions chain in VTLB or VHPT.
372 *
373 */
374 void thash_recycle_cch_all(thash_cb_t *hcb)
375 {
376 int num;
377 thash_data_t *head;
379 head = hcb->hash;
380 num = (hcb->hash_sz/sizeof(thash_data_t));
381 do {
382 head->next = 0;
383 head++;
384 num--;
385 } while(num);
386 cch_mem_init(hcb);
387 }
390 static thash_data_t *__alloc_chain(thash_cb_t *hcb)
391 {
392 thash_data_t *cch;
394 cch = cch_alloc(hcb);
395 if (cch == NULL) {
396 thash_recycle_cch_all(hcb);
397 cch = cch_alloc(hcb);
398 }
399 return cch;
400 }
402 /*
403 * Insert an entry into hash TLB or VHPT.
404 * NOTES:
405 * 1: When inserting VHPT to thash, "va" is a must covered
406 * address by the inserted machine VHPT entry.
407 * 2: The format of entry is always in TLB.
408 * 3: The caller need to make sure the new entry will not overlap
409 * with any existed entry.
410 */
411 static void vtlb_insert(VCPU *v, u64 pte, u64 itir, u64 va)
412 {
413 thash_data_t *hash_table, *cch, *tail;
414 /* int flag; */
415 ia64_rr vrr;
416 /* u64 gppn, ppns, ppne; */
417 u64 tag, len;
418 thash_cb_t *hcb = &v->arch.vtlb;
420 vcpu_quick_region_set(PSCBX(v, tc_regions), va);
422 vcpu_get_rr(v, va, &vrr.rrval);
423 vrr.ps = itir_ps(itir);
424 VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
425 hash_table = vtlb_thash(hcb->pta, va, vrr.rrval, &tag);
426 len = 0;
427 cch = hash_table;
428 do {
429 if (INVALID_TLB(cch)) {
430 cch->page_flags = pte;
431 cch->itir = itir;
432 cch->etag = tag;
433 return;
434 }
435 ++len;
436 tail = cch;
437 cch = cch->next;
438 } while(cch);
439 if (len >= MAX_CCN_DEPTH) {
440 thash_recycle_cch(hcb, hash_table, tail);
441 cch = cch_alloc(hcb);
442 }
443 else {
444 cch = __alloc_chain(hcb);
445 }
446 cch->page_flags = pte;
447 cch->itir = itir;
448 cch->etag = tag;
449 cch->next = hash_table->next;
450 wmb();
451 hash_table->next = cch;
452 return;
453 }
456 int vtr_find_overlap(VCPU *vcpu, u64 va, u64 ps, int is_data)
457 {
458 thash_data_t *trp;
459 int i;
460 u64 end, rid;
462 vcpu_get_rr(vcpu, va, &rid);
463 rid &= RR_RID_MASK;
464 end = va + PSIZE(ps);
465 if (is_data) {
466 if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
467 trp = (thash_data_t *)vcpu->arch.dtrs;
468 for (i = 0; i < NDTRS; i++, trp++) {
469 if (__is_tr_overlap(trp, rid, va, end )) {
470 return i;
471 }
472 }
473 }
474 }
475 else {
476 if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
477 trp = (thash_data_t *)vcpu->arch.itrs;
478 for (i = 0; i < NITRS; i++, trp++) {
479 if (__is_tr_overlap(trp, rid, va, end )) {
480 return i;
481 }
482 }
483 }
484 }
485 return -1;
486 }
488 /*
489 * Purge entries in VTLB and VHPT
490 */
491 void thash_purge_entries(VCPU *v, u64 va, u64 ps)
492 {
493 if (vcpu_quick_region_check(v->arch.tc_regions, va))
494 vtlb_purge(v, va, ps);
495 vhpt_purge(v, va, ps);
496 }
498 void thash_purge_entries_remote(VCPU *v, u64 va, u64 ps)
499 {
500 u64 old_va = va;
501 va = REGION_OFFSET(va);
502 if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
503 vtlb_purge(v, va, ps);
504 vhpt_purge(v, va, ps);
505 }
507 static u64 translate_phy_pte(VCPU *v, u64 pte, u64 itir, u64 va)
508 {
509 u64 ps, ps_mask, paddr, maddr;
510 union pte_flags phy_pte;
511 struct domain *d = v->domain;
513 ps = itir_ps(itir);
514 ps_mask = ~((1UL << ps) - 1);
515 phy_pte.val = pte;
516 paddr = ((pte & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
517 maddr = lookup_domain_mpa(d, paddr, NULL);
518 if (maddr & _PAGE_IO)
519 return -1;
521 /* Ensure WB attribute if pte is related to a normal mem page,
522 * which is required by vga acceleration since qemu maps shared
523 * vram buffer with WB.
524 */
525 if (mfn_valid(maddr >> PAGE_SHIFT) && phy_pte.ma != VA_MATTR_NATPAGE)
526 phy_pte.ma = VA_MATTR_WB;
528 maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
529 phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
531 /* If shadow mode is enabled, virtualize dirty bit. */
532 if (shadow_mode_enabled(d) && phy_pte.d) {
533 u64 gpfn = paddr >> PAGE_SHIFT;
534 phy_pte.val |= _PAGE_VIRT_D;
536 /* If the page is not already dirty, don't set the dirty bit! */
537 if (gpfn < d->arch.shadow_bitmap_size * 8
538 && !test_bit(gpfn, d->arch.shadow_bitmap))
539 phy_pte.d = 0;
540 }
542 return phy_pte.val;
543 }
546 /*
547 * Purge overlap TCs and then insert the new entry to emulate itc ops.
548 * Notes: Only TC entry can purge and insert.
549 * 1 indicates this is MMIO
550 */
551 int thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa, int type)
552 {
553 u64 ps, phy_pte, psr;
554 ia64_rr mrr;
556 ps = itir_ps(itir);
557 mrr.rrval = ia64_get_rr(ifa);
559 phy_pte = translate_phy_pte(v, pte, itir, ifa);
561 vtlb_purge(v, ifa, ps);
562 vhpt_purge(v, ifa, ps);
564 if (phy_pte == -1) {
565 vtlb_insert(v, pte, itir, ifa);
566 return 1;
567 }
569 if (ps != mrr.ps)
570 vtlb_insert(v, pte, itir, ifa);
572 if (ps >= mrr.ps) {
573 vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
574 } else { /* Subpaging */
575 phy_pte &= ~PAGE_FLAGS_RV_MASK;
576 psr = ia64_clear_ic();
577 ia64_itc(type + 1, ifa, phy_pte, IA64_ITIR_PS_KEY(ps, 0));
578 ia64_set_psr(psr);
579 ia64_srlz_i();
580 }
581 return 0;
582 }
584 /*
585 * Purge all TCs or VHPT entries including those in Hash table.
586 *
587 */
589 //TODO: add sections.
590 void thash_purge_all(VCPU *v)
591 {
592 int num;
593 thash_data_t *head;
594 thash_cb_t *vtlb,*vhpt;
595 vtlb = &v->arch.vtlb;
596 vhpt = &v->arch.vhpt;
598 for (num = 0; num < 8; num++)
599 VMX(v, psbits[num]) = 0;
601 head = vtlb->hash;
602 num = (vtlb->hash_sz/sizeof(thash_data_t));
603 do{
604 head->page_flags = 0;
605 head->etag = 1UL<<63;
606 head->itir = 0;
607 head->next = 0;
608 head++;
609 num--;
610 } while(num);
611 cch_mem_init(vtlb);
613 head = vhpt->hash;
614 num = (vhpt->hash_sz/sizeof(thash_data_t));
615 do{
616 head->page_flags = 0;
617 head->etag = 1UL<<63;
618 head->next = 0;
619 head++;
620 num--;
621 } while(num);
622 cch_mem_init(vhpt);
623 local_flush_tlb_all();
624 }
626 static void __thash_purge_all(void *arg)
627 {
628 struct vcpu *v = arg;
630 BUG_ON(vcpu_runnable(v) || v->is_running);
631 thash_purge_all(v);
632 }
634 void vmx_vcpu_flush_vtlb_all(VCPU *v)
635 {
636 if (v == current) {
637 thash_purge_all(v);
638 return;
639 }
641 /* SMP safe */
642 vcpu_pause(v);
643 if (v->processor == smp_processor_id())
644 __thash_purge_all(v);
645 else
646 smp_call_function_single(v->processor, __thash_purge_all, v, 1, 1);
647 vcpu_unpause(v);
648 }
651 /*
652 * Lookup the hash table and its collision chain to find an entry
653 * covering this address rid:va or the entry.
654 *
655 * INPUT:
656 * in: TLB format for both VHPT & TLB.
657 */
659 thash_data_t *vtlb_lookup(VCPU *v, u64 va,int is_data)
660 {
661 thash_data_t *cch;
662 u64 psbits, ps, tag;
663 ia64_rr vrr;
664 thash_cb_t *hcb = &v->arch.vtlb;
666 cch = __vtr_lookup(v, va, is_data);
667 if (cch)
668 return cch;
670 if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
671 return NULL;
672 psbits = VMX(v, psbits[(va >> 61)]);
673 vcpu_get_rr(v, va, &vrr.rrval);
674 while (psbits) {
675 ps = __ffs(psbits);
676 psbits &= ~(1UL << ps);
677 vrr.ps = ps;
678 cch = vtlb_thash(hcb->pta, va, vrr.rrval, &tag);
679 do {
680 if (cch->etag == tag && cch->ps == ps)
681 return cch;
682 cch = cch->next;
683 } while(cch);
684 }
685 return NULL;
686 }
689 /*
690 * Initialize internal control data before service.
691 */
692 static void thash_init(thash_cb_t *hcb, u64 sz)
693 {
694 int num;
695 thash_data_t *head;
697 hcb->pta.val = (unsigned long)hcb->hash;
698 hcb->pta.vf = 1;
699 hcb->pta.ve = 1;
700 hcb->pta.size = sz;
702 head = hcb->hash;
703 num = (hcb->hash_sz/sizeof(thash_data_t));
704 do {
705 head->page_flags = 0;
706 head->itir = 0;
707 head->etag = 1UL << 63;
708 head->next = 0;
709 head++;
710 num--;
711 } while(num);
713 hcb->cch_free_idx = 0;
714 hcb->cch_freelist = NULL;
715 }
717 int thash_alloc(thash_cb_t *hcb, u64 sz_log2, char *what)
718 {
719 struct page_info *page;
720 void * vbase;
721 u64 sz = 1UL << sz_log2;
723 page = alloc_domheap_pages(NULL, (sz_log2 + 1 - PAGE_SHIFT), 0);
724 if (page == NULL) {
725 printk("No enough contiguous memory(%ldKB) for init_domain_%s\n",
726 sz >> (10 - 1), what);
727 return -ENOMEM;
728 }
729 vbase = page_to_virt(page);
730 memset(vbase, 0, sz + sz); // hash + collisions chain
731 if (sz_log2 >= 20 - 1)
732 printk(XENLOG_DEBUG "Allocate domain %s at 0x%p(%ldMB)\n",
733 what, vbase, sz >> (20 - 1));
734 else
735 printk(XENLOG_DEBUG "Allocate domain %s at 0x%p(%ldKB)\n",
736 what, vbase, sz >> (10 - 1));
738 hcb->hash = vbase;
739 hcb->hash_sz = sz;
740 hcb->cch_buf = (void *)((u64)vbase + hcb->hash_sz);
741 hcb->cch_sz = sz;
742 thash_init(hcb, sz_log2);
743 return 0;
744 }
746 void thash_free(thash_cb_t *hcb)
747 {
748 struct page_info *page;
750 if (hcb->hash) {
751 page = virt_to_page(hcb->hash);
752 free_domheap_pages(page, hcb->pta.size + 1 - PAGE_SHIFT);
753 hcb->hash = 0;
754 }
755 }