ia64/xen-unstable

view xen/arch/ia64/xen/dom_fw.c @ 11273:3e0685ecfe64

[IA64] replace lost EFI_RESERVED_TYPE dom0 passthrough

Some systems (HP Superdome in particular) place ACPI tables in
reserved memory regions, so we have to pass it through to dom0.

Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author awilliam@xenbuild.aw
date Fri Aug 25 16:21:39 2006 -0600 (2006-08-25)
parents d188c51ea883
children 6bf652c677c8
line source
1 /*
2 * Xen domain firmware emulation support
3 * Copyright (C) 2004 Hewlett-Packard Co.
4 * Dan Magenheimer (dan.magenheimer@hp.com)
5 *
6 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
7 * VA Linux Systems Japan K.K.
8 * dom0 vp model support
9 */
11 #include <xen/config.h>
12 #include <asm/system.h>
13 #include <asm/pgalloc.h>
15 #include <linux/efi.h>
16 #include <linux/sort.h>
17 #include <asm/io.h>
18 #include <asm/pal.h>
19 #include <asm/sal.h>
20 #include <asm/meminit.h>
21 #include <asm/fpswa.h>
22 #include <xen/version.h>
23 #include <xen/acpi.h>
24 #include <xen/errno.h>
26 #include <asm/dom_fw.h>
27 #include <asm/bundle.h>
29 #define ONE_MB (1UL << 20)
31 extern unsigned long running_on_sim;
33 /* Base of FW tables. */
34 static const unsigned long dom_fw_base_mpa = FW_HYPERCALL_END_PADDR;
35 static unsigned long dom_fw_end_mpa;
37 /* Note: two domains cannot be created simulteanously! */
38 static unsigned long imva_fw_base = -1;
40 #define FW_VENDOR "X\0e\0n\0/\0i\0a\0\066\0\064\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
42 #define MAKE_MD(typ, attr, start, end) \
43 do { \
44 md = tables->efi_memmap + i++; \
45 md->type = typ; \
46 md->pad = 0; \
47 md->phys_addr = start; \
48 md->virt_addr = 0; \
49 md->num_pages = (end - start) >> EFI_PAGE_SHIFT; \
50 md->attribute = attr; \
51 } while (0)
53 #define EFI_HYPERCALL_PATCH(tgt, call) \
54 do { \
55 dom_efi_hypercall_patch(d, FW_HYPERCALL_##call##_PADDR, \
56 FW_HYPERCALL_##call); \
57 tables->efi_runtime.tgt = dom_pa((unsigned long) pfn); \
58 *pfn++ = FW_HYPERCALL_##call##_PADDR; \
59 *pfn++ = 0; \
60 } while (0)
62 // return domain (meta)physical address for a given imva
63 // this function is a call-back from dom_fw_init
64 static unsigned long
65 dom_pa(unsigned long imva)
66 {
67 if (imva_fw_base == -1) {
68 printf("dom_pa: uninitialized! (spinning...)\n");
69 while(1);
70 }
71 if (imva - imva_fw_base > PAGE_SIZE) {
72 printf("dom_pa: bad offset! imva=0x%lx, imva_fw_base=0x%lx (spinning...)\n",
73 imva, imva_fw_base);
74 while(1);
75 }
76 return dom_fw_base_mpa + (imva - imva_fw_base);
77 }
79 // allocate a page for fw
80 // build_physmap_table() which is called by new_thread()
81 // does for domU.
82 static inline void
83 assign_new_domain_page_if_dom0(struct domain *d, unsigned long mpaddr)
84 {
85 if (d == dom0)
86 assign_new_domain0_page(d, mpaddr);
87 }
89 /**************************************************************************
90 Hypercall bundle creation
91 **************************************************************************/
93 static void build_hypercall_bundle(UINT64 *imva, UINT64 brkimm, UINT64 hypnum, UINT64 ret)
94 {
95 INST64_A5 slot0;
96 INST64_I19 slot1;
97 INST64_B4 slot2;
98 IA64_BUNDLE bundle;
100 // slot1: mov r2 = hypnum (low 20 bits)
101 slot0.inst = 0;
102 slot0.qp = 0; slot0.r1 = 2; slot0.r3 = 0; slot0.major = 0x9;
103 slot0.imm7b = hypnum; slot0.imm9d = hypnum >> 7;
104 slot0.imm5c = hypnum >> 16; slot0.s = 0;
105 // slot1: break brkimm
106 slot1.inst = 0;
107 slot1.qp = 0; slot1.x6 = 0; slot1.x3 = 0; slot1.major = 0x0;
108 slot1.imm20 = brkimm; slot1.i = brkimm >> 20;
109 // if ret slot2: br.ret.sptk.many rp
110 // else slot2: br.cond.sptk.many rp
111 slot2.inst = 0; slot2.qp = 0; slot2.p = 1; slot2.b2 = 0;
112 slot2.wh = 0; slot2.d = 0; slot2.major = 0x0;
113 if (ret) {
114 slot2.btype = 4; slot2.x6 = 0x21;
115 }
116 else {
117 slot2.btype = 0; slot2.x6 = 0x20;
118 }
120 bundle.i64[0] = 0; bundle.i64[1] = 0;
121 bundle.template = 0x11;
122 bundle.slot0 = slot0.inst; bundle.slot2 = slot2.inst;
123 bundle.slot1a = slot1.inst; bundle.slot1b = slot1.inst >> 18;
125 imva[0] = bundle.i64[0]; imva[1] = bundle.i64[1];
126 ia64_fc(imva);
127 ia64_fc(imva + 1);
128 }
130 static void build_pal_hypercall_bundles(UINT64 *imva, UINT64 brkimm, UINT64 hypnum)
131 {
132 extern unsigned long pal_call_stub[];
133 IA64_BUNDLE bundle;
134 INST64_A5 slot_a5;
135 INST64_M37 slot_m37;
137 /* The source of the hypercall stub is the pal_call_stub function
138 defined in xenasm.S. */
140 /* Copy the first bundle and patch the hypercall number. */
141 bundle.i64[0] = pal_call_stub[0];
142 bundle.i64[1] = pal_call_stub[1];
143 slot_a5.inst = bundle.slot0;
144 slot_a5.imm7b = hypnum;
145 slot_a5.imm9d = hypnum >> 7;
146 slot_a5.imm5c = hypnum >> 16;
147 bundle.slot0 = slot_a5.inst;
148 imva[0] = bundle.i64[0];
149 imva[1] = bundle.i64[1];
150 ia64_fc(imva);
151 ia64_fc(imva + 1);
153 /* Copy the second bundle and patch the hypercall vector. */
154 bundle.i64[0] = pal_call_stub[2];
155 bundle.i64[1] = pal_call_stub[3];
156 slot_m37.inst = bundle.slot0;
157 slot_m37.imm20a = brkimm;
158 slot_m37.i = brkimm >> 20;
159 bundle.slot0 = slot_m37.inst;
160 imva[2] = bundle.i64[0];
161 imva[3] = bundle.i64[1];
162 ia64_fc(imva + 2);
163 ia64_fc(imva + 3);
164 }
166 // builds a hypercall bundle at domain physical address
167 static void dom_fpswa_hypercall_patch(struct domain *d)
168 {
169 unsigned long *entry_imva, *patch_imva;
170 unsigned long entry_paddr = FW_HYPERCALL_FPSWA_ENTRY_PADDR;
171 unsigned long patch_paddr = FW_HYPERCALL_FPSWA_PATCH_PADDR;
173 assign_new_domain_page_if_dom0(d, entry_paddr);
174 assign_new_domain_page_if_dom0(d, patch_paddr);
175 entry_imva = domain_mpa_to_imva(d, entry_paddr);
176 patch_imva = domain_mpa_to_imva(d, patch_paddr);
178 *entry_imva++ = patch_paddr;
179 *entry_imva = 0;
180 build_hypercall_bundle(patch_imva, d->arch.breakimm,
181 FW_HYPERCALL_FPSWA, 1);
182 }
184 // builds a hypercall bundle at domain physical address
185 static void dom_efi_hypercall_patch(struct domain *d, unsigned long paddr, unsigned long hypercall)
186 {
187 unsigned long *imva;
189 assign_new_domain_page_if_dom0(d, paddr);
190 imva = domain_mpa_to_imva(d, paddr);
191 build_hypercall_bundle(imva, d->arch.breakimm, hypercall, 1);
192 }
194 // builds a hypercall bundle at domain physical address
195 static void dom_fw_hypercall_patch(struct domain *d, unsigned long paddr, unsigned long hypercall,unsigned long ret)
196 {
197 unsigned long *imva;
199 assign_new_domain_page_if_dom0(d, paddr);
200 imva = domain_mpa_to_imva(d, paddr);
201 build_hypercall_bundle(imva, d->arch.breakimm, hypercall, ret);
202 }
204 static void dom_fw_pal_hypercall_patch(struct domain *d, unsigned long paddr)
205 {
206 unsigned long *imva;
208 assign_new_domain_page_if_dom0(d, paddr);
209 imva = domain_mpa_to_imva(d, paddr);
210 build_pal_hypercall_bundles(imva, d->arch.breakimm,
211 FW_HYPERCALL_PAL_CALL);
212 }
214 /* the following heavily leveraged from linux/arch/ia64/hp/sim/fw-emu.c */
216 static inline void
217 print_md(efi_memory_desc_t *md)
218 {
219 u64 size;
221 printk("dom mem: type=%2u, attr=0x%016lx, range=[0x%016lx-0x%016lx) ",
222 md->type, md->attribute, md->phys_addr,
223 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT));
225 size = md->num_pages << EFI_PAGE_SHIFT;
226 if (size > ONE_MB)
227 printf ("(%luMB)\n", size >> 20);
228 else
229 printf ("(%luKB)\n", size >> 10);
230 }
232 static u32 lsapic_nbr;
234 /* Modify lsapic table. Provides LPs. */
235 static int
236 acpi_update_lsapic (acpi_table_entry_header *header, const unsigned long end)
237 {
238 struct acpi_table_lsapic *lsapic;
239 int enable;
241 lsapic = (struct acpi_table_lsapic *) header;
242 if (!lsapic)
243 return -EINVAL;
245 if (lsapic_nbr < MAX_VIRT_CPUS && dom0->vcpu[lsapic_nbr] != NULL)
246 enable = 1;
247 else
248 enable = 0;
249 if (lsapic->flags.enabled && enable) {
250 printk("enable lsapic entry: 0x%lx\n", (u64)lsapic);
251 lsapic->id = lsapic_nbr;
252 lsapic->eid = 0;
253 lsapic_nbr++;
254 } else if (lsapic->flags.enabled) {
255 printk("DISABLE lsapic entry: 0x%lx\n", (u64)lsapic);
256 lsapic->flags.enabled = 0;
257 lsapic->id = 0;
258 lsapic->eid = 0;
259 }
260 return 0;
261 }
263 static u8
264 generate_acpi_checksum(void *tbl, unsigned long len)
265 {
266 u8 *ptr, sum = 0;
268 for (ptr = tbl; len > 0 ; len--, ptr++)
269 sum += *ptr;
271 return 0 - sum;
272 }
274 static int
275 acpi_update_madt_checksum (unsigned long phys_addr, unsigned long size)
276 {
277 struct acpi_table_madt* acpi_madt;
279 if (!phys_addr || !size)
280 return -EINVAL;
282 acpi_madt = (struct acpi_table_madt *) __va(phys_addr);
283 acpi_madt->header.checksum = 0;
284 acpi_madt->header.checksum = generate_acpi_checksum(acpi_madt, size);
286 return 0;
287 }
289 /* base is physical address of acpi table */
290 static void touch_acpi_table(void)
291 {
292 lsapic_nbr = 0;
293 if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_update_lsapic, 0) < 0)
294 printk("Error parsing MADT - no LAPIC entires\n");
295 acpi_table_parse(ACPI_APIC, acpi_update_madt_checksum);
297 return;
298 }
300 struct fake_acpi_tables {
301 struct acpi20_table_rsdp rsdp;
302 struct xsdt_descriptor_rev2 xsdt;
303 u64 madt_ptr;
304 struct fadt_descriptor_rev2 fadt;
305 struct facs_descriptor_rev2 facs;
306 struct acpi_table_header dsdt;
307 u8 aml[8 + 11 * MAX_VIRT_CPUS];
308 struct acpi_table_madt madt;
309 struct acpi_table_lsapic lsapic[MAX_VIRT_CPUS];
310 u8 pm1a_evt_blk[4];
311 u8 pm1a_cnt_blk[1];
312 u8 pm_tmr_blk[4];
313 };
315 /* Create enough of an ACPI structure to make the guest OS ACPI happy. */
316 static void
317 dom_fw_fake_acpi(struct domain *d, struct fake_acpi_tables *tables)
318 {
319 struct acpi20_table_rsdp *rsdp = &tables->rsdp;
320 struct xsdt_descriptor_rev2 *xsdt = &tables->xsdt;
321 struct fadt_descriptor_rev2 *fadt = &tables->fadt;
322 struct facs_descriptor_rev2 *facs = &tables->facs;
323 struct acpi_table_header *dsdt = &tables->dsdt;
324 struct acpi_table_madt *madt = &tables->madt;
325 struct acpi_table_lsapic *lsapic = tables->lsapic;
326 int i;
327 int aml_len;
328 int nbr_cpus;
330 memset(tables, 0, sizeof(struct fake_acpi_tables));
332 /* setup XSDT (64bit version of RSDT) */
333 strncpy(xsdt->signature, XSDT_SIG, 4);
334 /* XSDT points to both the FADT and the MADT, so add one entry */
335 xsdt->length = sizeof(struct xsdt_descriptor_rev2) + sizeof(u64);
336 xsdt->revision = 1;
337 strcpy(xsdt->oem_id, "XEN");
338 strcpy(xsdt->oem_table_id, "Xen/ia64");
339 strcpy(xsdt->asl_compiler_id, "XEN");
340 xsdt->asl_compiler_revision = (xen_major_version() << 16) |
341 xen_minor_version();
343 xsdt->table_offset_entry[0] = dom_pa((unsigned long) fadt);
344 tables->madt_ptr = dom_pa((unsigned long) madt);
346 xsdt->checksum = generate_acpi_checksum(xsdt, xsdt->length);
348 /* setup FADT */
349 strncpy(fadt->signature, FADT_SIG, 4);
350 fadt->length = sizeof(struct fadt_descriptor_rev2);
351 fadt->revision = FADT2_REVISION_ID;
352 strcpy(fadt->oem_id, "XEN");
353 strcpy(fadt->oem_table_id, "Xen/ia64");
354 strcpy(fadt->asl_compiler_id, "XEN");
355 fadt->asl_compiler_revision = (xen_major_version() << 16) |
356 xen_minor_version();
358 strncpy(facs->signature, FACS_SIG, 4);
359 facs->version = 1;
360 facs->length = sizeof(struct facs_descriptor_rev2);
362 fadt->xfirmware_ctrl = dom_pa((unsigned long) facs);
363 fadt->Xdsdt = dom_pa((unsigned long) dsdt);
365 /*
366 * All of the below FADT entries are filled it to prevent warnings
367 * from sanity checks in the ACPI CA. Emulate required ACPI hardware
368 * registers in system memory.
369 */
370 fadt->pm1_evt_len = 4;
371 fadt->xpm1a_evt_blk.address_space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
372 fadt->xpm1a_evt_blk.register_bit_width = 8;
373 fadt->xpm1a_evt_blk.address = dom_pa((unsigned long) &tables->pm1a_evt_blk);
374 fadt->pm1_cnt_len = 1;
375 fadt->xpm1a_cnt_blk.address_space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
376 fadt->xpm1a_cnt_blk.register_bit_width = 8;
377 fadt->xpm1a_cnt_blk.address = dom_pa((unsigned long) &tables->pm1a_cnt_blk);
378 fadt->pm_tm_len = 4;
379 fadt->xpm_tmr_blk.address_space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
380 fadt->xpm_tmr_blk.register_bit_width = 8;
381 fadt->xpm_tmr_blk.address = dom_pa((unsigned long) &tables->pm_tmr_blk);
383 fadt->checksum = generate_acpi_checksum(fadt, fadt->length);
385 /* setup RSDP */
386 strncpy(rsdp->signature, RSDP_SIG, 8);
387 strcpy(rsdp->oem_id, "XEN");
388 rsdp->revision = 2; /* ACPI 2.0 includes XSDT */
389 rsdp->length = sizeof(struct acpi20_table_rsdp);
390 rsdp->xsdt_address = dom_pa((unsigned long) xsdt);
392 rsdp->checksum = generate_acpi_checksum(rsdp,
393 ACPI_RSDP_CHECKSUM_LENGTH);
394 rsdp->ext_checksum = generate_acpi_checksum(rsdp, rsdp->length);
396 /* setup DSDT with trivial namespace. */
397 strncpy(dsdt->signature, DSDT_SIG, 4);
398 dsdt->revision = 1;
399 strcpy(dsdt->oem_id, "XEN");
400 strcpy(dsdt->oem_table_id, "Xen/ia64");
401 strcpy(dsdt->asl_compiler_id, "XEN");
402 dsdt->asl_compiler_revision = (xen_major_version() << 16) |
403 xen_minor_version();
405 /* Trivial namespace, avoids ACPI CA complaints */
406 tables->aml[0] = 0x10; /* Scope */
407 tables->aml[1] = 0x40; /* length/offset to next object (patched) */
408 tables->aml[2] = 0x00;
409 strncpy((char *)&tables->aml[3], "_SB_", 4);
411 /* The processor object isn't absolutely necessary, revist for SMP */
412 aml_len = 7;
413 for (i = 0; i < 3; i++) {
414 unsigned char *p = tables->aml + aml_len;
415 p[0] = 0x5b; /* processor object */
416 p[1] = 0x83;
417 p[2] = 0x0b; /* next */
418 p[3] = 'C';
419 p[4] = 'P';
420 snprintf ((char *)p + 5, 3, "%02x", i);
421 if (i < 16)
422 p[5] = 'U';
423 p[7] = i; /* acpi_id */
424 p[8] = 0; /* pblk_addr */
425 p[9] = 0;
426 p[10] = 0;
427 p[11] = 0;
428 p[12] = 0; /* pblk_len */
429 aml_len += 13;
430 }
431 tables->aml[1] = 0x40 + ((aml_len - 1) & 0x0f);
432 tables->aml[2] = (aml_len - 1) >> 4;
433 dsdt->length = sizeof(struct acpi_table_header) + aml_len;
434 dsdt->checksum = generate_acpi_checksum(dsdt, dsdt->length);
436 /* setup MADT */
437 strncpy(madt->header.signature, APIC_SIG, 4);
438 madt->header.revision = 2;
439 strcpy(madt->header.oem_id, "XEN");
440 strcpy(madt->header.oem_table_id, "Xen/ia64");
441 strcpy(madt->header.asl_compiler_id, "XEN");
442 madt->header.asl_compiler_revision = (xen_major_version() << 16) |
443 xen_minor_version();
445 /* An LSAPIC entry describes a CPU. */
446 nbr_cpus = 0;
447 for (i = 0; i < MAX_VIRT_CPUS; i++) {
448 lsapic[i].header.type = ACPI_MADT_LSAPIC;
449 lsapic[i].header.length = sizeof(struct acpi_table_lsapic);
450 lsapic[i].acpi_id = i;
451 lsapic[i].id = i;
452 lsapic[i].eid = 0;
453 if (d->vcpu[i] != NULL) {
454 lsapic[i].flags.enabled = 1;
455 nbr_cpus++;
456 }
457 }
458 madt->header.length = sizeof(struct acpi_table_madt) +
459 nbr_cpus * sizeof(struct acpi_table_lsapic);
460 madt->header.checksum = generate_acpi_checksum(madt,
461 madt->header.length);
462 return;
463 }
465 struct dom0_passthrough_arg {
466 struct domain* d;
467 int flags;
468 efi_memory_desc_t *md;
469 int* i;
470 };
472 static int
473 dom_fw_dom0_passthrough(efi_memory_desc_t *md, void *arg__)
474 {
475 struct dom0_passthrough_arg* arg = (struct dom0_passthrough_arg*)arg__;
476 unsigned long paddr;
477 struct domain* d = arg->d;
478 u64 start = md->phys_addr;
479 u64 size = md->num_pages << EFI_PAGE_SHIFT;
481 if (md->type == EFI_MEMORY_MAPPED_IO ||
482 md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
484 //XXX some machine has large mmio area whose size is about several TB.
485 // It requires impractical memory to map such a huge region
486 // to a domain.
487 // For now we don't map it, but later we must fix this.
488 if (md->type == EFI_MEMORY_MAPPED_IO && (size > 0x100000000UL))
489 return 0;
491 paddr = assign_domain_mmio_page(d, start, size);
492 } else
493 paddr = assign_domain_mach_page(d, start, size, arg->flags);
495 BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
496 md->type != EFI_RUNTIME_SERVICES_DATA &&
497 md->type != EFI_ACPI_RECLAIM_MEMORY &&
498 md->type != EFI_ACPI_MEMORY_NVS &&
499 md->type != EFI_RESERVED_TYPE &&
500 md->type != EFI_MEMORY_MAPPED_IO &&
501 md->type != EFI_MEMORY_MAPPED_IO_PORT_SPACE);
503 arg->md->type = md->type;
504 arg->md->pad = 0;
505 arg->md->phys_addr = paddr;
506 arg->md->virt_addr = 0;
507 arg->md->num_pages = md->num_pages;
508 arg->md->attribute = md->attribute;
510 (*arg->i)++;
511 arg->md++;
512 return 0;
513 }
515 /*
516 * Create dom0 MDT entries for conventional memory below 1MB. Without
517 * this Linux will assume VGA is present because 0xA0000 will always
518 * be either a hole in the MDT or an I/O region via the passthrough.
519 */
520 static int
521 dom_fw_dom0_lowmem(efi_memory_desc_t *md, void *arg__)
522 {
523 struct dom0_passthrough_arg* arg = (struct dom0_passthrough_arg*)arg__;
524 u64 end = min(ONE_MB, md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT));
526 BUG_ON(md->type != EFI_CONVENTIONAL_MEMORY);
528 /* Avoid firmware and hypercall area.
529 We know they are 0-based. */
530 if (end < dom_fw_end_mpa || md->phys_addr >= ONE_MB)
531 return 0;
532 if (md->phys_addr < dom_fw_end_mpa)
533 md->phys_addr = dom_fw_end_mpa;
535 arg->md->type = md->type;
536 arg->md->pad = 0;
537 arg->md->phys_addr = md->phys_addr;
538 arg->md->virt_addr = 0;
539 arg->md->num_pages = (end - md->phys_addr) >> EFI_PAGE_SHIFT;
540 arg->md->attribute = md->attribute;
542 (*arg->i)++;
543 arg->md++;
545 return 0;
546 }
548 static int
549 efi_mdt_cmp(const void *a, const void *b)
550 {
551 const efi_memory_desc_t *x = a, *y = b;
553 if (x->phys_addr > y->phys_addr)
554 return 1;
555 if (x->phys_addr < y->phys_addr)
556 return -1;
558 // num_pages == 0 is allowed.
559 if (x->num_pages > y->num_pages)
560 return 1;
561 if (x->num_pages < y->num_pages)
562 return -1;
564 return 0;
565 }
567 #define NFUNCPTRS 16
568 #define NUM_EFI_SYS_TABLES 6
569 #define NUM_MEM_DESCS 64 //large enough
571 struct fw_tables {
572 efi_system_table_t efi_systab;
573 efi_runtime_services_t efi_runtime;
574 efi_config_table_t efi_tables[NUM_EFI_SYS_TABLES];
576 struct ia64_sal_systab sal_systab;
577 struct ia64_sal_desc_entry_point sal_ed;
578 struct ia64_sal_desc_ap_wakeup sal_wakeup;
579 /* End of SAL descriptors. Do not forget to update checkum bound. */
581 fpswa_interface_t fpswa_inf;
582 efi_memory_desc_t efi_memmap[NUM_MEM_DESCS];
583 unsigned long func_ptrs[2*NFUNCPTRS];
584 struct xen_sal_data sal_data;
585 unsigned char fw_vendor[sizeof(FW_VENDOR)];
587 struct fake_acpi_tables acpi_tables;
588 };
590 static void
591 dom_fw_init(struct domain *d,
592 struct ia64_boot_param *bp,
593 struct fw_tables *tables,
594 unsigned long maxmem)
595 {
596 efi_memory_desc_t *md;
597 unsigned long *pfn;
598 unsigned char checksum;
599 char *cp;
600 int num_mds, i;
602 memset(tables, 0, sizeof(struct fw_tables));
604 /* Initialise for EFI_SET_VIRTUAL_ADDRESS_MAP emulation */
605 d->arch.efi_runtime = &tables->efi_runtime;
606 d->arch.fpswa_inf = &tables->fpswa_inf;
607 d->arch.sal_data = &tables->sal_data;
609 /* EFI systab. */
610 tables->efi_systab.hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE;
611 tables->efi_systab.hdr.revision = EFI_SYSTEM_TABLE_REVISION;
612 tables->efi_systab.hdr.headersize = sizeof(tables->efi_systab.hdr);
614 memcpy(tables->fw_vendor,FW_VENDOR,sizeof(FW_VENDOR));
615 tables->efi_systab.fw_vendor =
616 dom_pa((unsigned long)tables->fw_vendor);
617 tables->efi_systab.fw_revision = 1;
618 tables->efi_systab.runtime =
619 (void *)dom_pa((unsigned long)&tables->efi_runtime);
620 tables->efi_systab.nr_tables = NUM_EFI_SYS_TABLES;
621 tables->efi_systab.tables = dom_pa((unsigned long)tables->efi_tables);
623 /* EFI runtime. */
624 tables->efi_runtime.hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE;
625 tables->efi_runtime.hdr.revision = EFI_RUNTIME_SERVICES_REVISION;
626 tables->efi_runtime.hdr.headersize = sizeof(tables->efi_runtime.hdr);
628 pfn = tables->func_ptrs;
629 EFI_HYPERCALL_PATCH(get_time,EFI_GET_TIME);
630 EFI_HYPERCALL_PATCH(set_time,EFI_SET_TIME);
631 EFI_HYPERCALL_PATCH(get_wakeup_time,EFI_GET_WAKEUP_TIME);
632 EFI_HYPERCALL_PATCH(set_wakeup_time,EFI_SET_WAKEUP_TIME);
633 EFI_HYPERCALL_PATCH(set_virtual_address_map,
634 EFI_SET_VIRTUAL_ADDRESS_MAP);
635 EFI_HYPERCALL_PATCH(get_variable,EFI_GET_VARIABLE);
636 EFI_HYPERCALL_PATCH(get_next_variable,EFI_GET_NEXT_VARIABLE);
637 EFI_HYPERCALL_PATCH(set_variable,EFI_SET_VARIABLE);
638 EFI_HYPERCALL_PATCH(get_next_high_mono_count,
639 EFI_GET_NEXT_HIGH_MONO_COUNT);
640 EFI_HYPERCALL_PATCH(reset_system,EFI_RESET_SYSTEM);
642 /* System tables. */
643 tables->efi_tables[0].guid = SAL_SYSTEM_TABLE_GUID;
644 tables->efi_tables[0].table =
645 dom_pa((unsigned long)&tables->sal_systab);
646 for (i = 1; i < NUM_EFI_SYS_TABLES; i++) {
647 tables->efi_tables[i].guid = NULL_GUID;
648 tables->efi_tables[i].table = 0;
649 }
650 i = 1;
651 if (d == dom0) {
652 printf("Domain0 EFI passthrough:");
653 if (efi.mps) {
654 tables->efi_tables[i].guid = MPS_TABLE_GUID;
655 tables->efi_tables[i].table = __pa(efi.mps);
656 printf(" MPS=0x%lx",tables->efi_tables[i].table);
657 i++;
658 }
660 touch_acpi_table();
662 if (efi.acpi20) {
663 tables->efi_tables[i].guid = ACPI_20_TABLE_GUID;
664 tables->efi_tables[i].table = __pa(efi.acpi20);
665 printf(" ACPI 2.0=0x%lx",tables->efi_tables[i].table);
666 i++;
667 }
668 if (efi.acpi) {
669 tables->efi_tables[i].guid = ACPI_TABLE_GUID;
670 tables->efi_tables[i].table = __pa(efi.acpi);
671 printf(" ACPI=0x%lx",tables->efi_tables[i].table);
672 i++;
673 }
674 if (efi.smbios) {
675 tables->efi_tables[i].guid = SMBIOS_TABLE_GUID;
676 tables->efi_tables[i].table = __pa(efi.smbios);
677 printf(" SMBIOS=0x%lx",tables->efi_tables[i].table);
678 i++;
679 }
680 if (efi.hcdp) {
681 tables->efi_tables[i].guid = HCDP_TABLE_GUID;
682 tables->efi_tables[i].table = __pa(efi.hcdp);
683 printf(" HCDP=0x%lx",tables->efi_tables[i].table);
684 i++;
685 }
686 printf("\n");
687 } else {
688 printf("DomainU EFI build up:");
690 dom_fw_fake_acpi(d, &tables->acpi_tables);
692 tables->efi_tables[i].guid = ACPI_20_TABLE_GUID;
693 tables->efi_tables[i].table =
694 dom_pa((unsigned long) &tables->acpi_tables);
695 printf(" ACPI 2.0=0x%lx",tables->efi_tables[i].table);
696 i++;
697 printf("\n");
698 }
700 /* fill in the SAL system table: */
701 memcpy(tables->sal_systab.signature, "SST_", 4);
702 tables->sal_systab.size = sizeof(tables->sal_systab);
703 tables->sal_systab.sal_rev_minor = 1;
704 tables->sal_systab.sal_rev_major = 0;
705 tables->sal_systab.entry_count = 2;
707 strcpy((char *)tables->sal_systab.oem_id, "Xen/ia64");
708 strcpy((char *)tables->sal_systab.product_id, "Xen/ia64");
710 /* fill in an entry point: */
711 tables->sal_ed.type = SAL_DESC_ENTRY_POINT;
712 tables->sal_ed.pal_proc = FW_HYPERCALL_PAL_CALL_PADDR;
713 dom_fw_pal_hypercall_patch(d, tables->sal_ed.pal_proc);
714 tables->sal_ed.sal_proc = FW_HYPERCALL_SAL_CALL_PADDR;
715 dom_fw_hypercall_patch(d, tables->sal_ed.sal_proc,
716 FW_HYPERCALL_SAL_CALL, 1);
717 tables->sal_ed.gp = 0; /* will be ignored */
719 /* Fill an AP wakeup descriptor. */
720 tables->sal_wakeup.type = SAL_DESC_AP_WAKEUP;
721 tables->sal_wakeup.mechanism = IA64_SAL_AP_EXTERNAL_INT;
722 tables->sal_wakeup.vector = XEN_SAL_BOOT_RENDEZ_VEC;
724 /* Compute checksum. */
725 checksum = 0;
726 for (cp = (char *)&tables->sal_systab;
727 cp < (char *)&tables->fpswa_inf;
728 ++cp)
729 checksum += *cp;
730 tables->sal_systab.checksum = -checksum;
732 /* SAL return point. */
733 d->arch.sal_return_addr = FW_HYPERCALL_SAL_RETURN_PADDR;
734 dom_fw_hypercall_patch (d, d->arch.sal_return_addr,
735 FW_HYPERCALL_SAL_RETURN, 0);
737 /* Fill in the FPSWA interface: */
738 tables->fpswa_inf.revision = fpswa_interface->revision;
739 dom_fpswa_hypercall_patch(d);
740 tables->fpswa_inf.fpswa = (void *)FW_HYPERCALL_FPSWA_ENTRY_PADDR;
742 i = 0; /* Used by MAKE_MD */
744 /* hypercall patches live here, masquerade as reserved PAL memory */
745 MAKE_MD(EFI_PAL_CODE,EFI_MEMORY_WB|EFI_MEMORY_RUNTIME,
746 FW_HYPERCALL_BASE_PADDR, FW_HYPERCALL_END_PADDR);
748 /* Create dom0/domu md entry for fw tables area */
749 MAKE_MD(EFI_RUNTIME_SERVICES_DATA, EFI_MEMORY_WB | EFI_MEMORY_RUNTIME,
750 dom_fw_base_mpa, dom_fw_end_mpa);
752 if (d != dom0 || running_on_sim) {
753 /* Memory. */
754 MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
755 dom_fw_end_mpa, maxmem);
757 /* Create an entry for IO ports. */
758 MAKE_MD(EFI_MEMORY_MAPPED_IO_PORT_SPACE, EFI_MEMORY_UC,
759 IO_PORTS_PADDR, IO_PORTS_PADDR + IO_PORTS_SIZE);
761 num_mds = i;
762 }
763 else {
764 /* pass through the I/O port space */
765 struct dom0_passthrough_arg arg;
766 u64 addr;
767 int j;
769 /* Fill from real entries. */
770 arg.md = &tables->efi_memmap[i];
771 arg.i = &i;
772 arg.d = d;
773 arg.flags = ASSIGN_writable;
774 //XXX Is this needed?
775 efi_memmap_walk_type(EFI_RUNTIME_SERVICES_CODE,
776 dom_fw_dom0_passthrough, &arg);
777 // for ACPI table.
778 arg.flags = ASSIGN_readonly;
779 efi_memmap_walk_type(EFI_RUNTIME_SERVICES_DATA,
780 dom_fw_dom0_passthrough, &arg);
781 arg.flags = ASSIGN_writable;
782 efi_memmap_walk_type(EFI_ACPI_RECLAIM_MEMORY,
783 dom_fw_dom0_passthrough, &arg);
784 efi_memmap_walk_type(EFI_ACPI_MEMORY_NVS,
785 dom_fw_dom0_passthrough, &arg);
786 efi_memmap_walk_type(EFI_RESERVED_TYPE,
787 dom_fw_dom0_passthrough, &arg);
788 efi_memmap_walk_type(EFI_MEMORY_MAPPED_IO,
789 dom_fw_dom0_passthrough, &arg);
790 efi_memmap_walk_type(EFI_MEMORY_MAPPED_IO_PORT_SPACE,
791 dom_fw_dom0_passthrough, &arg);
792 efi_memmap_walk_type(EFI_CONVENTIONAL_MEMORY,
793 dom_fw_dom0_lowmem, &arg);
794 num_mds = i;
796 sort(tables->efi_memmap, num_mds, sizeof(efi_memory_desc_t),
797 efi_mdt_cmp, NULL);
799 // find gaps and fill them with conventional memory
800 for (j = 0; j < num_mds; j++) {
801 unsigned long end;
802 unsigned long next_start;
804 md = &tables->efi_memmap[j];
805 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
807 if (j + 1 < num_mds) {
808 efi_memory_desc_t* next_md;
809 next_md = &tables->efi_memmap[j+1];
810 next_start = next_md->phys_addr;
812 /* Have just been sorted. */
813 BUG_ON(end > next_start);
815 /* No room for memory! */
816 if (end == next_start)
817 continue;
819 if (next_start > maxmem)
820 next_start = maxmem;
821 }
822 else
823 next_start = maxmem;
825 /* Avoid "legacy" low memory addresses and
826 the HYPERCALL area. */
827 if (end < ONE_MB)
828 end = ONE_MB;
830 // clip the range and align to PAGE_SIZE
831 next_start = next_start & PAGE_MASK;
832 end = PAGE_ALIGN(end);
834 /* No room for memory. */
835 if (end >= next_start)
836 continue;
838 MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
839 end, next_start);
841 if (next_start >= maxmem)
842 break;
843 }
844 num_mds = i;
845 BUG_ON(num_mds > NUM_MEM_DESCS);
846 sort(tables->efi_memmap, num_mds, sizeof(efi_memory_desc_t),
847 efi_mdt_cmp, NULL);
849 // dom0 doesn't need build_physmap_table()
850 // see arch_set_info_guest()
851 // instead we allocate pages manually.
852 for (i = 0; i < num_mds; i++) {
853 md = &tables->efi_memmap[i];
854 if (md->phys_addr > maxmem)
855 break;
857 if (md->type == EFI_LOADER_DATA ||
858 md->type == EFI_PAL_CODE ||
859 md->type == EFI_CONVENTIONAL_MEMORY) {
860 unsigned long start = md->phys_addr & PAGE_MASK;
861 unsigned long end = md->phys_addr +
862 (md->num_pages << EFI_PAGE_SHIFT);
864 if (end == start) {
865 // md->num_pages = 0 is allowed.
866 continue;
867 }
868 if (end > (max_page << PAGE_SHIFT))
869 end = (max_page << PAGE_SHIFT);
871 for (addr = start;
872 addr < end;
873 addr += PAGE_SIZE) {
874 assign_new_domain0_page(d, addr);
875 }
876 }
877 }
878 // Map low-memory holes & unmapped MMIO for legacy drivers
879 for (addr = 0; addr < ONE_MB; addr += PAGE_SIZE) {
880 if (domain_page_mapped(d, addr))
881 continue;
883 if (efi_mmio(addr, PAGE_SIZE))
884 assign_domain_mmio_page(d, addr, PAGE_SIZE);
885 }
886 }
888 /* Display memmap. */
889 for (i = 0 ; i < num_mds; i++)
890 print_md(&tables->efi_memmap[i]);
892 /* Fill boot_param */
893 bp->efi_systab = dom_pa((unsigned long)&tables->efi_systab);
894 bp->efi_memmap = dom_pa((unsigned long)tables->efi_memmap);
895 bp->efi_memmap_size = num_mds * sizeof(efi_memory_desc_t);
896 bp->efi_memdesc_size = sizeof(efi_memory_desc_t);
897 bp->efi_memdesc_version = EFI_MEMDESC_VERSION;
898 bp->command_line = 0;
899 bp->console_info.num_cols = 80;
900 bp->console_info.num_rows = 25;
901 bp->console_info.orig_x = 0;
902 bp->console_info.orig_y = 24;
903 bp->fpswa = dom_pa((unsigned long) &tables->fpswa_inf);
904 }
906 void dom_fw_setup(struct domain *d, unsigned long bp_mpa, unsigned long maxmem)
907 {
908 struct ia64_boot_param *bp;
910 /* Note: 4KB < size < 8KB. */
911 BUILD_BUG_ON(sizeof(struct fw_tables) > PAGE_SIZE);
913 dom_fw_end_mpa = PAGE_ALIGN(dom_fw_base_mpa + sizeof(struct fw_tables));
915 /* Create page for hypercalls. */
916 assign_new_domain_page_if_dom0(d, dom_fw_base_mpa);
917 imva_fw_base = (unsigned long)domain_mpa_to_imva(d, dom_fw_base_mpa);
919 /* Create page for boot_param. */
920 assign_new_domain_page_if_dom0(d, bp_mpa);
921 bp = domain_mpa_to_imva(d, bp_mpa);
923 dom_fw_init(d, bp, (struct fw_tables *)imva_fw_base, maxmem);
924 }