direct-io.hg

view xen/arch/ia64/xen/dom_fw.c @ 12425:5c5af79e7272

[IA64] IA64 counter part of the change 12204:e6fdb32b786c of xen-unstable.hg

Remove xc_ia64_get_pfn_list() from setup_guest() in xc_linux_build.c,
use xc_domain_populate_physmap() and xc_domain_translate_gpfn_list().

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Fri Nov 10 11:14:32 2006 -0700 (2006-11-10)
parents 29b02d929b7e
children 05d227d81935
line source
1 /*
2 * Xen domain firmware emulation support
3 * Copyright (C) 2004 Hewlett-Packard Co.
4 * Dan Magenheimer (dan.magenheimer@hp.com)
5 *
6 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
7 * VA Linux Systems Japan K.K.
8 * dom0 vp model support
9 */
11 #include <xen/config.h>
12 #include <asm/system.h>
13 #include <asm/pgalloc.h>
15 #include <linux/efi.h>
16 #include <linux/sort.h>
17 #include <asm/io.h>
18 #include <asm/pal.h>
19 #include <asm/sal.h>
20 #include <asm/meminit.h>
21 #include <asm/fpswa.h>
22 #include <xen/version.h>
23 #include <xen/acpi.h>
24 #include <xen/errno.h>
26 #include <asm/dom_fw.h>
27 #include <asm/bundle.h>
29 #define ONE_MB (1UL << 20)
31 extern unsigned long running_on_sim;
33 #define FW_VENDOR "X\0e\0n\0/\0i\0a\0\066\0\064\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
35 #define MAKE_MD(typ, attr, start, end) \
36 do { \
37 md = tables->efi_memmap + i++; \
38 md->type = typ; \
39 md->pad = 0; \
40 md->phys_addr = start; \
41 md->virt_addr = 0; \
42 md->num_pages = (end - start) >> EFI_PAGE_SHIFT; \
43 md->attribute = attr; \
44 } while (0)
46 #define EFI_HYPERCALL_PATCH(tgt, call) \
47 do { \
48 dom_efi_hypercall_patch(d, FW_HYPERCALL_##call##_PADDR, \
49 FW_HYPERCALL_##call, hypercalls_imva); \
50 /* Descriptor address. */ \
51 tables->efi_runtime.tgt = \
52 FW_FIELD_MPA(func_ptrs) + 8 * pfn; \
53 /* Descriptor. */ \
54 tables->func_ptrs[pfn++] = FW_HYPERCALL_##call##_PADDR; \
55 tables->func_ptrs[pfn++] = 0; \
56 } while (0)
58 /* allocate a page for fw
59 * guest_setup() @ libxc/xc_linux_build.c does for domU
60 */
61 static inline void
62 assign_new_domain_page_if_dom0(struct domain *d, unsigned long mpaddr)
63 {
64 if (d == dom0)
65 assign_new_domain0_page(d, mpaddr);
66 }
68 /**************************************************************************
69 Hypercall bundle creation
70 **************************************************************************/
72 static void
73 build_hypercall_bundle(u64 *imva, u64 brkimm, u64 hypnum, u64 ret)
74 {
75 INST64_A5 slot0;
76 INST64_I19 slot1;
77 INST64_B4 slot2;
78 IA64_BUNDLE bundle;
80 // slot1: mov r2 = hypnum (low 20 bits)
81 slot0.inst = 0;
82 slot0.qp = 0; slot0.r1 = 2; slot0.r3 = 0; slot0.major = 0x9;
83 slot0.imm7b = hypnum; slot0.imm9d = hypnum >> 7;
84 slot0.imm5c = hypnum >> 16; slot0.s = 0;
85 // slot1: break brkimm
86 slot1.inst = 0;
87 slot1.qp = 0; slot1.x6 = 0; slot1.x3 = 0; slot1.major = 0x0;
88 slot1.imm20 = brkimm; slot1.i = brkimm >> 20;
89 // if ret slot2: br.ret.sptk.many rp
90 // else slot2: br.cond.sptk.many rp
91 slot2.inst = 0; slot2.qp = 0; slot2.p = 1; slot2.b2 = 0;
92 slot2.wh = 0; slot2.d = 0; slot2.major = 0x0;
93 if (ret) {
94 slot2.btype = 4; slot2.x6 = 0x21;
95 }
96 else {
97 slot2.btype = 0; slot2.x6 = 0x20;
98 }
100 bundle.i64[0] = 0; bundle.i64[1] = 0;
101 bundle.template = 0x11;
102 bundle.slot0 = slot0.inst; bundle.slot2 = slot2.inst;
103 bundle.slot1a = slot1.inst; bundle.slot1b = slot1.inst >> 18;
105 imva[0] = bundle.i64[0]; imva[1] = bundle.i64[1];
106 ia64_fc(imva);
107 ia64_fc(imva + 1);
108 }
110 static void
111 build_pal_hypercall_bundles(u64 *imva, u64 brkimm, u64 hypnum)
112 {
113 extern unsigned long pal_call_stub[];
114 IA64_BUNDLE bundle;
115 INST64_A5 slot_a5;
116 INST64_M37 slot_m37;
118 /* The source of the hypercall stub is the pal_call_stub function
119 defined in xenasm.S. */
121 /* Copy the first bundle and patch the hypercall number. */
122 bundle.i64[0] = pal_call_stub[0];
123 bundle.i64[1] = pal_call_stub[1];
124 slot_a5.inst = bundle.slot0;
125 slot_a5.imm7b = hypnum;
126 slot_a5.imm9d = hypnum >> 7;
127 slot_a5.imm5c = hypnum >> 16;
128 bundle.slot0 = slot_a5.inst;
129 imva[0] = bundle.i64[0];
130 imva[1] = bundle.i64[1];
131 ia64_fc(imva);
132 ia64_fc(imva + 1);
134 /* Copy the second bundle and patch the hypercall vector. */
135 bundle.i64[0] = pal_call_stub[2];
136 bundle.i64[1] = pal_call_stub[3];
137 slot_m37.inst = bundle.slot0;
138 slot_m37.imm20a = brkimm;
139 slot_m37.i = brkimm >> 20;
140 bundle.slot0 = slot_m37.inst;
141 imva[2] = bundle.i64[0];
142 imva[3] = bundle.i64[1];
143 ia64_fc(imva + 2);
144 ia64_fc(imva + 3);
145 }
147 // builds a hypercall bundle at domain physical address
148 static void
149 dom_fpswa_hypercall_patch(struct domain *d, unsigned long imva)
150 {
151 unsigned long *entry_imva, *patch_imva;
152 const unsigned long entry_paddr = FW_HYPERCALL_FPSWA_ENTRY_PADDR;
153 const unsigned long patch_paddr = FW_HYPERCALL_FPSWA_PATCH_PADDR;
155 entry_imva = (unsigned long *)(imva + entry_paddr -
156 FW_HYPERCALL_BASE_PADDR);
157 patch_imva = (unsigned long *)(imva + patch_paddr -
158 FW_HYPERCALL_BASE_PADDR);
160 /* Descriptor. */
161 *entry_imva++ = patch_paddr;
162 *entry_imva = 0;
164 build_hypercall_bundle(patch_imva, d->arch.breakimm,
165 FW_HYPERCALL_FPSWA, 1);
166 }
168 // builds a hypercall bundle at domain physical address
169 static void
170 dom_efi_hypercall_patch(struct domain *d, unsigned long paddr,
171 unsigned long hypercall, unsigned long imva)
172 {
173 build_hypercall_bundle((u64 *)(imva + paddr - FW_HYPERCALL_BASE_PADDR),
174 d->arch.breakimm, hypercall, 1);
175 }
177 // builds a hypercall bundle at domain physical address
178 static void
179 dom_fw_hypercall_patch(struct domain *d, unsigned long paddr,
180 unsigned long hypercall,unsigned long ret,
181 unsigned long imva)
182 {
183 build_hypercall_bundle((u64 *)(imva + paddr - FW_HYPERCALL_BASE_PADDR),
184 d->arch.breakimm, hypercall, ret);
185 }
187 static void
188 dom_fw_pal_hypercall_patch(struct domain *d, unsigned long paddr,
189 unsigned long imva)
190 {
191 build_pal_hypercall_bundles((u64*)(imva + paddr -
192 FW_HYPERCALL_BASE_PADDR),
193 d->arch.breakimm, FW_HYPERCALL_PAL_CALL);
194 }
196 static inline void
197 print_md(efi_memory_desc_t *md)
198 {
199 u64 size;
201 printk("dom mem: type=%2u, attr=0x%016lx, range=[0x%016lx-0x%016lx) ",
202 md->type, md->attribute, md->phys_addr,
203 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT));
205 size = md->num_pages << EFI_PAGE_SHIFT;
206 if (size > ONE_MB)
207 printk ("(%luMB)\n", size >> 20);
208 else
209 printk ("(%luKB)\n", size >> 10);
210 }
212 static u32 lsapic_nbr;
214 /* Modify lsapic table. Provides LPs. */
215 static int
216 acpi_update_lsapic (acpi_table_entry_header *header, const unsigned long end)
217 {
218 struct acpi_table_lsapic *lsapic;
219 int enable;
221 lsapic = (struct acpi_table_lsapic *) header;
222 if (!lsapic)
223 return -EINVAL;
225 if (lsapic_nbr < MAX_VIRT_CPUS && dom0->vcpu[lsapic_nbr] != NULL)
226 enable = 1;
227 else
228 enable = 0;
229 if (lsapic->flags.enabled && enable) {
230 printk("enable lsapic entry: 0x%lx\n", (u64)lsapic);
231 lsapic->id = lsapic_nbr;
232 lsapic->eid = 0;
233 lsapic_nbr++;
234 } else if (lsapic->flags.enabled) {
235 printk("DISABLE lsapic entry: 0x%lx\n", (u64)lsapic);
236 lsapic->flags.enabled = 0;
237 lsapic->id = 0;
238 lsapic->eid = 0;
239 }
240 return 0;
241 }
243 static u8
244 generate_acpi_checksum(void *tbl, unsigned long len)
245 {
246 u8 *ptr, sum = 0;
248 for (ptr = tbl; len > 0 ; len--, ptr++)
249 sum += *ptr;
251 return 0 - sum;
252 }
254 static int
255 acpi_update_madt_checksum (unsigned long phys_addr, unsigned long size)
256 {
257 struct acpi_table_madt* acpi_madt;
259 if (!phys_addr || !size)
260 return -EINVAL;
262 acpi_madt = (struct acpi_table_madt *) __va(phys_addr);
263 acpi_madt->header.checksum = 0;
264 acpi_madt->header.checksum = generate_acpi_checksum(acpi_madt, size);
266 return 0;
267 }
269 /* base is physical address of acpi table */
270 static void touch_acpi_table(void)
271 {
272 lsapic_nbr = 0;
273 if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_update_lsapic, 0) < 0)
274 printk("Error parsing MADT - no LAPIC entires\n");
275 acpi_table_parse(ACPI_APIC, acpi_update_madt_checksum);
277 return;
278 }
280 struct fake_acpi_tables {
281 struct acpi20_table_rsdp rsdp;
282 struct xsdt_descriptor_rev2 xsdt;
283 u64 madt_ptr;
284 struct fadt_descriptor_rev2 fadt;
285 struct facs_descriptor_rev2 facs;
286 struct acpi_table_header dsdt;
287 u8 aml[8 + 11 * MAX_VIRT_CPUS];
288 struct acpi_table_madt madt;
289 struct acpi_table_lsapic lsapic[MAX_VIRT_CPUS];
290 u8 pm1a_evt_blk[4];
291 u8 pm1a_cnt_blk[1];
292 u8 pm_tmr_blk[4];
293 };
294 #define ACPI_TABLE_MPA(field) \
295 FW_ACPI_BASE_PADDR + offsetof(struct fake_acpi_tables, field);
297 /* Create enough of an ACPI structure to make the guest OS ACPI happy. */
298 static void
299 dom_fw_fake_acpi(struct domain *d, struct fake_acpi_tables *tables)
300 {
301 struct acpi20_table_rsdp *rsdp = &tables->rsdp;
302 struct xsdt_descriptor_rev2 *xsdt = &tables->xsdt;
303 struct fadt_descriptor_rev2 *fadt = &tables->fadt;
304 struct facs_descriptor_rev2 *facs = &tables->facs;
305 struct acpi_table_header *dsdt = &tables->dsdt;
306 struct acpi_table_madt *madt = &tables->madt;
307 struct acpi_table_lsapic *lsapic = tables->lsapic;
308 int i;
309 int aml_len;
310 int nbr_cpus;
312 memset(tables, 0, sizeof(struct fake_acpi_tables));
314 /* setup XSDT (64bit version of RSDT) */
315 strncpy(xsdt->signature, XSDT_SIG, 4);
316 /* XSDT points to both the FADT and the MADT, so add one entry */
317 xsdt->length = sizeof(struct xsdt_descriptor_rev2) + sizeof(u64);
318 xsdt->revision = 1;
319 strcpy(xsdt->oem_id, "XEN");
320 strcpy(xsdt->oem_table_id, "Xen/ia64");
321 strcpy(xsdt->asl_compiler_id, "XEN");
322 xsdt->asl_compiler_revision = (xen_major_version() << 16) |
323 xen_minor_version();
325 xsdt->table_offset_entry[0] = ACPI_TABLE_MPA(fadt);
326 tables->madt_ptr = ACPI_TABLE_MPA(madt);
328 xsdt->checksum = generate_acpi_checksum(xsdt, xsdt->length);
330 /* setup FADT */
331 strncpy(fadt->signature, FADT_SIG, 4);
332 fadt->length = sizeof(struct fadt_descriptor_rev2);
333 fadt->revision = FADT2_REVISION_ID;
334 strcpy(fadt->oem_id, "XEN");
335 strcpy(fadt->oem_table_id, "Xen/ia64");
336 strcpy(fadt->asl_compiler_id, "XEN");
337 fadt->asl_compiler_revision = (xen_major_version() << 16) |
338 xen_minor_version();
340 strncpy(facs->signature, FACS_SIG, 4);
341 facs->version = 1;
342 facs->length = sizeof(struct facs_descriptor_rev2);
344 fadt->xfirmware_ctrl = ACPI_TABLE_MPA(facs);
345 fadt->Xdsdt = ACPI_TABLE_MPA(dsdt);
347 /*
348 * All of the below FADT entries are filled it to prevent warnings
349 * from sanity checks in the ACPI CA. Emulate required ACPI hardware
350 * registers in system memory.
351 */
352 fadt->pm1_evt_len = 4;
353 fadt->xpm1a_evt_blk.address_space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
354 fadt->xpm1a_evt_blk.register_bit_width = 8;
355 fadt->xpm1a_evt_blk.address = ACPI_TABLE_MPA(pm1a_evt_blk);
356 fadt->pm1_cnt_len = 1;
357 fadt->xpm1a_cnt_blk.address_space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
358 fadt->xpm1a_cnt_blk.register_bit_width = 8;
359 fadt->xpm1a_cnt_blk.address = ACPI_TABLE_MPA(pm1a_cnt_blk);
360 fadt->pm_tm_len = 4;
361 fadt->xpm_tmr_blk.address_space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
362 fadt->xpm_tmr_blk.register_bit_width = 8;
363 fadt->xpm_tmr_blk.address = ACPI_TABLE_MPA(pm_tmr_blk);
365 fadt->checksum = generate_acpi_checksum(fadt, fadt->length);
367 /* setup RSDP */
368 strncpy(rsdp->signature, RSDP_SIG, 8);
369 strcpy(rsdp->oem_id, "XEN");
370 rsdp->revision = 2; /* ACPI 2.0 includes XSDT */
371 rsdp->length = sizeof(struct acpi20_table_rsdp);
372 rsdp->xsdt_address = ACPI_TABLE_MPA(xsdt);
374 rsdp->checksum = generate_acpi_checksum(rsdp,
375 ACPI_RSDP_CHECKSUM_LENGTH);
376 rsdp->ext_checksum = generate_acpi_checksum(rsdp, rsdp->length);
378 /* setup DSDT with trivial namespace. */
379 strncpy(dsdt->signature, DSDT_SIG, 4);
380 dsdt->revision = 1;
381 strcpy(dsdt->oem_id, "XEN");
382 strcpy(dsdt->oem_table_id, "Xen/ia64");
383 strcpy(dsdt->asl_compiler_id, "XEN");
384 dsdt->asl_compiler_revision = (xen_major_version() << 16) |
385 xen_minor_version();
387 /* Trivial namespace, avoids ACPI CA complaints */
388 tables->aml[0] = 0x10; /* Scope */
389 tables->aml[1] = 0x40; /* length/offset to next object (patched) */
390 tables->aml[2] = 0x00;
391 strncpy((char *)&tables->aml[3], "_SB_", 4);
393 /* The processor object isn't absolutely necessary, revist for SMP */
394 aml_len = 7;
395 for (i = 0; i < 3; i++) {
396 unsigned char *p = tables->aml + aml_len;
397 p[0] = 0x5b; /* processor object */
398 p[1] = 0x83;
399 p[2] = 0x0b; /* next */
400 p[3] = 'C';
401 p[4] = 'P';
402 snprintf ((char *)p + 5, 3, "%02x", i);
403 if (i < 16)
404 p[5] = 'U';
405 p[7] = i; /* acpi_id */
406 p[8] = 0; /* pblk_addr */
407 p[9] = 0;
408 p[10] = 0;
409 p[11] = 0;
410 p[12] = 0; /* pblk_len */
411 aml_len += 13;
412 }
413 tables->aml[1] = 0x40 + ((aml_len - 1) & 0x0f);
414 tables->aml[2] = (aml_len - 1) >> 4;
415 dsdt->length = sizeof(struct acpi_table_header) + aml_len;
416 dsdt->checksum = generate_acpi_checksum(dsdt, dsdt->length);
418 /* setup MADT */
419 strncpy(madt->header.signature, APIC_SIG, 4);
420 madt->header.revision = 2;
421 strcpy(madt->header.oem_id, "XEN");
422 strcpy(madt->header.oem_table_id, "Xen/ia64");
423 strcpy(madt->header.asl_compiler_id, "XEN");
424 madt->header.asl_compiler_revision = (xen_major_version() << 16) |
425 xen_minor_version();
427 /* An LSAPIC entry describes a CPU. */
428 nbr_cpus = 0;
429 for (i = 0; i < MAX_VIRT_CPUS; i++) {
430 lsapic[i].header.type = ACPI_MADT_LSAPIC;
431 lsapic[i].header.length = sizeof(struct acpi_table_lsapic);
432 lsapic[i].acpi_id = i;
433 lsapic[i].id = i;
434 lsapic[i].eid = 0;
435 if (d->vcpu[i] != NULL) {
436 lsapic[i].flags.enabled = 1;
437 nbr_cpus++;
438 }
439 }
440 madt->header.length = sizeof(struct acpi_table_madt) +
441 nbr_cpus * sizeof(struct acpi_table_lsapic);
442 madt->header.checksum = generate_acpi_checksum(madt,
443 madt->header.length);
444 return;
445 }
447 static int
448 efi_mdt_cmp(const void *a, const void *b)
449 {
450 const efi_memory_desc_t *x = a, *y = b;
452 if (x->phys_addr > y->phys_addr)
453 return 1;
454 if (x->phys_addr < y->phys_addr)
455 return -1;
457 // num_pages == 0 is allowed.
458 if (x->num_pages > y->num_pages)
459 return 1;
460 if (x->num_pages < y->num_pages)
461 return -1;
463 return 0;
464 }
466 #define NFUNCPTRS 16
467 #define NUM_EFI_SYS_TABLES 6
468 #define NUM_MEM_DESCS 64 //large enough
470 struct fw_tables {
471 efi_system_table_t efi_systab;
472 efi_runtime_services_t efi_runtime;
473 efi_config_table_t efi_tables[NUM_EFI_SYS_TABLES];
475 struct ia64_sal_systab sal_systab;
476 struct ia64_sal_desc_entry_point sal_ed;
477 struct ia64_sal_desc_ap_wakeup sal_wakeup;
478 /* End of SAL descriptors. Do not forget to update checkum bound. */
480 fpswa_interface_t fpswa_inf;
481 efi_memory_desc_t efi_memmap[NUM_MEM_DESCS];
482 unsigned long func_ptrs[2*NFUNCPTRS];
483 struct xen_sal_data sal_data;
484 unsigned char fw_vendor[sizeof(FW_VENDOR)];
485 };
486 #define FW_FIELD_MPA(field) \
487 FW_TABLES_BASE_PADDR + offsetof(struct fw_tables, field)
489 /* Complete the dom0 memmap. */
490 static int
491 complete_dom0_memmap(struct domain *d,
492 struct fw_tables *tables,
493 unsigned long maxmem,
494 int num_mds)
495 {
496 efi_memory_desc_t *md;
497 u64 addr;
498 int j;
499 void *efi_map_start, *efi_map_end, *p;
500 u64 efi_desc_size;
501 int i;
503 /* Walk through all MDT entries.
504 Copy all interesting entries. */
505 efi_map_start = __va(ia64_boot_param->efi_memmap);
506 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
507 efi_desc_size = ia64_boot_param->efi_memdesc_size;
509 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
510 const efi_memory_desc_t *md = p;
511 efi_memory_desc_t *dom_md = &tables->efi_memmap[num_mds];
512 u64 start = md->phys_addr;
513 u64 size = md->num_pages << EFI_PAGE_SHIFT;
514 u64 end = start + size;
516 switch (md->type) {
517 case EFI_RUNTIME_SERVICES_CODE:
518 case EFI_RUNTIME_SERVICES_DATA:
519 case EFI_ACPI_RECLAIM_MEMORY:
520 case EFI_ACPI_MEMORY_NVS:
521 case EFI_RESERVED_TYPE:
522 /* Map into dom0 - All these are writable. */
523 assign_domain_mach_page(d, start, size,
524 ASSIGN_writable);
525 /* Fall-through. */
526 case EFI_MEMORY_MAPPED_IO:
527 /* Will be mapped with ioremap. */
528 /* Copy descriptor. */
529 *dom_md = *md;
530 dom_md->virt_addr = 0;
531 num_mds++;
532 break;
534 case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
535 /* Map into dom0. */
536 assign_domain_mmio_page(d, start, size);
537 /* Copy descriptor. */
538 *dom_md = *md;
539 dom_md->virt_addr = 0;
540 num_mds++;
541 break;
543 case EFI_CONVENTIONAL_MEMORY:
544 case EFI_LOADER_CODE:
545 case EFI_LOADER_DATA:
546 case EFI_BOOT_SERVICES_CODE:
547 case EFI_BOOT_SERVICES_DATA:
548 /* Create dom0 MDT entries for conventional memory
549 below 1MB. Without this Linux will assume VGA is
550 present because 0xA0000 will always be either a hole
551 in the MDT or an I/O region via the passthrough. */
553 end = min(ONE_MB, end);
555 /* Avoid firmware and hypercall area.
556 We know they are 0-based. */
557 if (end < FW_END_PADDR || start >= ONE_MB)
558 break;
559 if (start < FW_END_PADDR)
560 start = FW_END_PADDR;
562 dom_md->type = EFI_CONVENTIONAL_MEMORY;
563 dom_md->phys_addr = start;
564 dom_md->virt_addr = 0;
565 dom_md->num_pages = (end - start) >> EFI_PAGE_SHIFT;
566 dom_md->attribute = md->attribute;
567 num_mds++;
568 break;
570 case EFI_UNUSABLE_MEMORY:
571 case EFI_PAL_CODE:
572 /* Discard. */
573 break;
575 default:
576 /* Print a warning but continue. */
577 printk("complete_dom0_memmap: warning: "
578 "unhandled MDT entry type %u\n", md->type);
579 }
580 }
581 BUG_ON(num_mds > NUM_MEM_DESCS);
583 sort(tables->efi_memmap, num_mds, sizeof(efi_memory_desc_t),
584 efi_mdt_cmp, NULL);
586 /* find gaps and fill them with conventional memory */
587 i = num_mds;
588 for (j = 0; j < num_mds; j++) {
589 unsigned long end;
590 unsigned long next_start;
592 md = &tables->efi_memmap[j];
593 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
595 if (j + 1 < num_mds) {
596 efi_memory_desc_t* next_md;
597 next_md = &tables->efi_memmap[j + 1];
598 next_start = next_md->phys_addr;
600 /* Have just been sorted. */
601 BUG_ON(end > next_start);
603 /* No room for memory! */
604 if (end == next_start)
605 continue;
607 if (next_start > maxmem)
608 next_start = maxmem;
609 }
610 else
611 next_start = maxmem;
613 /* Avoid "legacy" low memory addresses
614 and the HYPERCALL area. */
615 if (end < ONE_MB)
616 end = ONE_MB;
618 // clip the range and align to PAGE_SIZE
619 next_start = next_start & PAGE_MASK;
620 end = PAGE_ALIGN(end);
622 /* No room for memory. */
623 if (end >= next_start)
624 continue;
626 MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
627 end, next_start);
629 if (next_start >= maxmem)
630 break;
631 }
632 num_mds = i;
633 BUG_ON(num_mds > NUM_MEM_DESCS);
634 sort(tables->efi_memmap, num_mds, sizeof(efi_memory_desc_t),
635 efi_mdt_cmp, NULL);
637 /* setup_guest() @ libxc/xc_linux_build() arranges memory for domU.
638 * however no one arranges memory for dom0,
639 * instead we allocate pages manually.
640 */
641 for (i = 0; i < num_mds; i++) {
642 md = &tables->efi_memmap[i];
643 if (md->phys_addr > maxmem)
644 break;
646 if (md->type == EFI_LOADER_DATA ||
647 md->type == EFI_PAL_CODE ||
648 md->type == EFI_CONVENTIONAL_MEMORY) {
649 unsigned long start = md->phys_addr & PAGE_MASK;
650 unsigned long end = md->phys_addr +
651 (md->num_pages << EFI_PAGE_SHIFT);
653 if (end == start) {
654 /* md->num_pages = 0 is allowed. */
655 continue;
656 }
657 if (end > (max_page << PAGE_SHIFT))
658 end = (max_page << PAGE_SHIFT);
660 for (addr = start; addr < end; addr += PAGE_SIZE)
661 assign_new_domain0_page(d, addr);
662 }
663 }
664 // Map low-memory holes & unmapped MMIO for legacy drivers
665 for (addr = 0; addr < ONE_MB; addr += PAGE_SIZE) {
666 if (domain_page_mapped(d, addr))
667 continue;
669 if (efi_mmio(addr, PAGE_SIZE))
670 assign_domain_mmio_page(d, addr, PAGE_SIZE);
671 }
672 return num_mds;
673 }
675 static void
676 dom_fw_init(struct domain *d,
677 struct ia64_boot_param *bp,
678 struct fw_tables *tables,
679 unsigned long hypercalls_imva,
680 unsigned long maxmem)
681 {
682 efi_memory_desc_t *md;
683 unsigned long pfn;
684 unsigned char checksum;
685 char *cp;
686 int num_mds, i;
688 memset(tables, 0, sizeof(struct fw_tables));
690 /* Initialise for EFI_SET_VIRTUAL_ADDRESS_MAP emulation */
691 d->arch.efi_runtime = &tables->efi_runtime;
692 d->arch.fpswa_inf = &tables->fpswa_inf;
693 d->arch.sal_data = &tables->sal_data;
695 /* EFI systab. */
696 tables->efi_systab.hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE;
697 tables->efi_systab.hdr.revision = EFI_SYSTEM_TABLE_REVISION;
698 tables->efi_systab.hdr.headersize = sizeof(tables->efi_systab.hdr);
700 memcpy(tables->fw_vendor,FW_VENDOR,sizeof(FW_VENDOR));
701 tables->efi_systab.fw_vendor = FW_FIELD_MPA(fw_vendor);
702 tables->efi_systab.fw_revision = 1;
703 tables->efi_systab.runtime = (void *)FW_FIELD_MPA(efi_runtime);
704 tables->efi_systab.nr_tables = NUM_EFI_SYS_TABLES;
705 tables->efi_systab.tables = FW_FIELD_MPA(efi_tables);
707 /* EFI runtime. */
708 tables->efi_runtime.hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE;
709 tables->efi_runtime.hdr.revision = EFI_RUNTIME_SERVICES_REVISION;
710 tables->efi_runtime.hdr.headersize = sizeof(tables->efi_runtime.hdr);
712 pfn = 0;
713 EFI_HYPERCALL_PATCH(get_time,EFI_GET_TIME);
714 EFI_HYPERCALL_PATCH(set_time,EFI_SET_TIME);
715 EFI_HYPERCALL_PATCH(get_wakeup_time,EFI_GET_WAKEUP_TIME);
716 EFI_HYPERCALL_PATCH(set_wakeup_time,EFI_SET_WAKEUP_TIME);
717 EFI_HYPERCALL_PATCH(set_virtual_address_map,
718 EFI_SET_VIRTUAL_ADDRESS_MAP);
719 EFI_HYPERCALL_PATCH(get_variable,EFI_GET_VARIABLE);
720 EFI_HYPERCALL_PATCH(get_next_variable,EFI_GET_NEXT_VARIABLE);
721 EFI_HYPERCALL_PATCH(set_variable,EFI_SET_VARIABLE);
722 EFI_HYPERCALL_PATCH(get_next_high_mono_count,
723 EFI_GET_NEXT_HIGH_MONO_COUNT);
724 EFI_HYPERCALL_PATCH(reset_system,EFI_RESET_SYSTEM);
726 /* System tables. */
727 tables->efi_tables[0].guid = SAL_SYSTEM_TABLE_GUID;
728 tables->efi_tables[0].table = FW_FIELD_MPA(sal_systab);
729 for (i = 1; i < NUM_EFI_SYS_TABLES; i++) {
730 tables->efi_tables[i].guid = NULL_GUID;
731 tables->efi_tables[i].table = 0;
732 }
733 i = 1;
734 if (d == dom0) {
735 /* Write messages to the console. */
736 touch_acpi_table();
738 printk("Domain0 EFI passthrough:");
739 if (efi.mps) {
740 tables->efi_tables[i].guid = MPS_TABLE_GUID;
741 tables->efi_tables[i].table = __pa(efi.mps);
742 printk(" MPS=0x%lx",tables->efi_tables[i].table);
743 i++;
744 }
746 if (efi.acpi20) {
747 tables->efi_tables[i].guid = ACPI_20_TABLE_GUID;
748 tables->efi_tables[i].table = __pa(efi.acpi20);
749 printk(" ACPI 2.0=0x%lx",tables->efi_tables[i].table);
750 i++;
751 }
752 if (efi.acpi) {
753 tables->efi_tables[i].guid = ACPI_TABLE_GUID;
754 tables->efi_tables[i].table = __pa(efi.acpi);
755 printk(" ACPI=0x%lx",tables->efi_tables[i].table);
756 i++;
757 }
758 if (efi.smbios) {
759 tables->efi_tables[i].guid = SMBIOS_TABLE_GUID;
760 tables->efi_tables[i].table = __pa(efi.smbios);
761 printk(" SMBIOS=0x%lx",tables->efi_tables[i].table);
762 i++;
763 }
764 if (efi.hcdp) {
765 tables->efi_tables[i].guid = HCDP_TABLE_GUID;
766 tables->efi_tables[i].table = __pa(efi.hcdp);
767 printk(" HCDP=0x%lx",tables->efi_tables[i].table);
768 i++;
769 }
770 printk("\n");
771 } else {
772 printk("DomainU EFI build up:");
774 tables->efi_tables[i].guid = ACPI_20_TABLE_GUID;
775 tables->efi_tables[i].table = FW_ACPI_BASE_PADDR;
776 printk(" ACPI 2.0=0x%lx",tables->efi_tables[i].table);
777 i++;
778 printk("\n");
779 }
781 /* fill in the SAL system table: */
782 memcpy(tables->sal_systab.signature, "SST_", 4);
783 tables->sal_systab.size = sizeof(tables->sal_systab);
784 tables->sal_systab.sal_rev_minor = 1;
785 tables->sal_systab.sal_rev_major = 0;
786 tables->sal_systab.entry_count = 2;
788 strcpy((char *)tables->sal_systab.oem_id, "Xen/ia64");
789 strcpy((char *)tables->sal_systab.product_id, "Xen/ia64");
791 /* PAL entry point: */
792 tables->sal_ed.type = SAL_DESC_ENTRY_POINT;
793 tables->sal_ed.pal_proc = FW_HYPERCALL_PAL_CALL_PADDR;
794 dom_fw_pal_hypercall_patch(d, tables->sal_ed.pal_proc,
795 hypercalls_imva);
796 /* SAL entry point. */
797 tables->sal_ed.sal_proc = FW_HYPERCALL_SAL_CALL_PADDR;
798 dom_fw_hypercall_patch(d, tables->sal_ed.sal_proc,
799 FW_HYPERCALL_SAL_CALL, 1, hypercalls_imva);
800 tables->sal_ed.gp = 0; /* will be ignored */
802 /* Fill an AP wakeup descriptor. */
803 tables->sal_wakeup.type = SAL_DESC_AP_WAKEUP;
804 tables->sal_wakeup.mechanism = IA64_SAL_AP_EXTERNAL_INT;
805 tables->sal_wakeup.vector = XEN_SAL_BOOT_RENDEZ_VEC;
807 /* Compute checksum. */
808 checksum = 0;
809 for (cp = (char *)&tables->sal_systab;
810 cp < (char *)&tables->fpswa_inf;
811 ++cp)
812 checksum += *cp;
813 tables->sal_systab.checksum = -checksum;
815 /* SAL return point. */
816 dom_fw_hypercall_patch(d, FW_HYPERCALL_SAL_RETURN_PADDR,
817 FW_HYPERCALL_SAL_RETURN, 0, hypercalls_imva);
819 /* Fill in the FPSWA interface: */
820 if (fpswa_interface) {
821 tables->fpswa_inf.revision = fpswa_interface->revision;
822 dom_fpswa_hypercall_patch(d, hypercalls_imva);
823 tables->fpswa_inf.fpswa =
824 (void *)FW_HYPERCALL_FPSWA_ENTRY_PADDR;
825 }
827 i = 0; /* Used by MAKE_MD */
829 /* hypercall patches live here, masquerade as reserved PAL memory */
830 MAKE_MD(EFI_PAL_CODE,EFI_MEMORY_WB|EFI_MEMORY_RUNTIME,
831 FW_HYPERCALL_BASE_PADDR, FW_HYPERCALL_END_PADDR);
833 /* Create dom0/domu md entry for fw and cpi tables area. */
834 MAKE_MD(EFI_ACPI_MEMORY_NVS, EFI_MEMORY_WB | EFI_MEMORY_RUNTIME,
835 FW_ACPI_BASE_PADDR, FW_ACPI_END_PADDR);
836 MAKE_MD(EFI_RUNTIME_SERVICES_DATA, EFI_MEMORY_WB | EFI_MEMORY_RUNTIME,
837 FW_TABLES_BASE_PADDR, FW_TABLES_END_PADDR);
839 if (d != dom0 || running_on_sim) {
840 /* DomU (or hp-ski).
841 Create a continuous memory area. */
842 /* Memory. */
843 MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
844 FW_END_PADDR, maxmem);
846 /* Create an entry for IO ports. */
847 MAKE_MD(EFI_MEMORY_MAPPED_IO_PORT_SPACE, EFI_MEMORY_UC,
848 IO_PORTS_PADDR, IO_PORTS_PADDR + IO_PORTS_SIZE);
850 num_mds = i;
851 }
852 else {
853 /* Dom0.
854 We must preserve ACPI data from real machine,
855 as well as IO areas. */
856 num_mds = complete_dom0_memmap(d, tables, maxmem, i);
857 }
859 /* Display memmap. */
860 for (i = 0 ; i < num_mds; i++)
861 print_md(&tables->efi_memmap[i]);
863 /* Fill boot_param */
864 bp->efi_systab = FW_FIELD_MPA(efi_systab);
865 bp->efi_memmap = FW_FIELD_MPA(efi_memmap);
866 bp->efi_memmap_size = num_mds * sizeof(efi_memory_desc_t);
867 bp->efi_memdesc_size = sizeof(efi_memory_desc_t);
868 bp->efi_memdesc_version = EFI_MEMDESC_VERSION;
869 bp->command_line = 0;
870 bp->console_info.num_cols = 80;
871 bp->console_info.num_rows = 25;
872 bp->console_info.orig_x = 0;
873 bp->console_info.orig_y = 24;
874 if (fpswa_interface)
875 bp->fpswa = FW_FIELD_MPA(fpswa_inf);
876 }
878 void dom_fw_setup(struct domain *d, unsigned long bp_mpa, unsigned long maxmem)
879 {
880 struct ia64_boot_param *bp;
881 unsigned long imva_tables_base;
882 unsigned long imva_hypercall_base;
884 BUILD_BUG_ON(sizeof(struct fw_tables) >
885 (FW_TABLES_END_PADDR - FW_TABLES_BASE_PADDR));
887 BUILD_BUG_ON(sizeof(struct fake_acpi_tables) >
888 (FW_ACPI_END_PADDR - FW_ACPI_BASE_PADDR));
890 /* Create page for hypercalls. */
891 assign_new_domain_page_if_dom0(d, FW_HYPERCALL_BASE_PADDR);
892 imva_hypercall_base = (unsigned long)domain_mpa_to_imva
893 (d, FW_HYPERCALL_BASE_PADDR);
895 /* Create page for acpi tables. */
896 if (d != dom0) {
897 void *imva;
899 assign_new_domain_page_if_dom0(d, FW_ACPI_BASE_PADDR);
900 imva = domain_mpa_to_imva (d, FW_ACPI_BASE_PADDR);
901 dom_fw_fake_acpi(d, (struct fake_acpi_tables *)imva);
902 }
904 /* Create page for FW tables. */
905 assign_new_domain_page_if_dom0(d, FW_TABLES_BASE_PADDR);
906 imva_tables_base = (unsigned long)domain_mpa_to_imva
907 (d, FW_TABLES_BASE_PADDR);
909 /* Create page for boot_param. */
910 assign_new_domain_page_if_dom0(d, bp_mpa);
911 bp = domain_mpa_to_imva(d, bp_mpa);
913 dom_fw_init(d, bp, (struct fw_tables *)imva_tables_base,
914 imva_hypercall_base, maxmem);
915 }