ia64/xen-unstable

view tools/libxc/powerpc64/xc_linux_build.c @ 14240:4c08045ff57c

[POWERPC][XEN][LIBXC] Make xc_linux_build() use populate_physmap()
- populate_physmap() is the only way to invoke
guest_physmap_{add/remove}_page(), which populate our new p2m table.
- To use it, we must to specify an array of PFNs where the new MFNs will be
mapped.
- Split out alloc_memory() from xc_linux_build().
- Fix memory free path in xc_linux_build().
Signed-off-by: Ryan Harper <ryanh@us.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Hollis Blanchard <hollisb@us.ibm.com>
date Fri Mar 02 17:08:04 2007 -0600 (2007-03-02)
parents 6b42b8c08731
children
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright IBM Corporation 2006, 2007
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Ryan Harper <ryanh@us.ibm.com>
20 */
22 #include <stdio.h>
23 #include <stdint.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <fcntl.h>
28 #include <sys/types.h>
29 #include <inttypes.h>
31 #include <xen/xen.h>
32 #include <xen/memory.h>
33 #include <xc_private.h>
34 #include <xg_private.h>
35 #include <xenctrl.h>
37 #include "flatdevtree_env.h"
38 #include "flatdevtree.h"
39 #include "utils.h"
40 #include "mk_flatdevtree.h"
42 /* Use 16MB extents to match PowerPC's large page size. */
43 #define EXTENT_SHIFT 24
44 #define EXTENT_ORDER (EXTENT_SHIFT - PAGE_SHIFT)
46 #define INITRD_ADDR (24UL << 20)
47 #define DEVTREE_ADDR (16UL << 20)
49 static int init_boot_vcpu(
50 int xc_handle,
51 int domid,
52 struct domain_setup_info *dsi,
53 unsigned long devtree_addr,
54 unsigned long kern_addr)
55 {
56 vcpu_guest_context_t ctxt;
57 int rc;
59 memset(&ctxt.user_regs, 0x55, sizeof(ctxt.user_regs));
60 ctxt.user_regs.pc = dsi->v_kernentry;
61 ctxt.user_regs.msr = 0;
62 ctxt.user_regs.gprs[1] = 0; /* Linux uses its own stack */
63 ctxt.user_regs.gprs[3] = devtree_addr;
64 ctxt.user_regs.gprs[4] = kern_addr;
65 ctxt.user_regs.gprs[5] = 0;
66 /* There is a buggy kernel that does not zero the "local_paca", so
67 * we must make sure this register is 0 */
68 ctxt.user_regs.gprs[13] = 0;
70 DPRINTF("xc_vcpu_setvcpucontext:\n"
71 " pc 0x%016"PRIx64", msr 0x%016"PRIx64"\n"
72 " r1-5 %016"PRIx64" %016"PRIx64" %016"PRIx64" %016"PRIx64
73 " %016"PRIx64"\n",
74 ctxt.user_regs.pc, ctxt.user_regs.msr,
75 ctxt.user_regs.gprs[1],
76 ctxt.user_regs.gprs[2],
77 ctxt.user_regs.gprs[3],
78 ctxt.user_regs.gprs[4],
79 ctxt.user_regs.gprs[5]);
80 rc = xc_vcpu_setcontext(xc_handle, domid, 0, &ctxt);
81 if (rc < 0)
82 perror("setdomaininfo");
84 return rc;
85 }
87 static int load_initrd(
88 int xc_handle,
89 int domid,
90 xen_pfn_t *page_array,
91 const char *initrd_path,
92 unsigned long *base,
93 unsigned long *len)
94 {
95 uint8_t *initrd_img;
96 int rc = -1;
98 /* load the initrd file */
99 initrd_img = load_file(initrd_path, len);
100 if (initrd_img == NULL)
101 return -1;
103 DPRINTF("copying initrd to 0x%lx[0x%lx]\n", INITRD_ADDR, *len);
104 if (install_image(xc_handle, domid, page_array, initrd_img, INITRD_ADDR,
105 *len))
106 goto out;
108 *base = INITRD_ADDR;
109 rc = 0;
111 out:
112 free(initrd_img);
113 return rc;
114 }
116 static void free_page_array(xen_pfn_t *page_array)
117 {
118 free(page_array);
119 }
121 static int check_memory_config(int rma_log, unsigned int mem_mb)
122 {
123 u64 mem_kb = (mem_mb << 10);
124 u64 rma_kb = (1 << rma_log) >> 10;
126 switch(rma_log)
127 {
128 case 26:
129 case 27:
130 case 28:
131 case 30:
132 case 34:
133 case 38:
134 if (mem_kb < rma_kb) {
135 DPRINTF("Domain memory must be at least %dMB\n",
136 (1 << rma_log)>>20);
137 break;
138 }
140 if (mem_kb % (16 << 10)) {
141 DPRINTF("Domain memory %dMB must be a multiple of 16MB\n",
142 mem_mb);
144 break;
145 }
147 /* rma_log and mem_mb OK */
148 return 0;
150 default:
151 DPRINTF("Invalid rma_log (%d)\n", rma_log);
152 }
154 return 1;
155 }
157 static int alloc_memory(int xc_handle, domid_t domid, ulong nr_pages,
158 ulong rma_pages)
159 {
160 xen_pfn_t *extent_pfn_arry;
161 ulong nr_extents;
162 ulong start_pfn = rma_pages;
163 int i;
164 int j;
165 int rc = 0;
167 nr_extents = (nr_pages - rma_pages) >> EXTENT_ORDER;
168 DPRINTF("allocating memory in %lu chunks of %luMB\n", nr_extents,
169 1UL >> (20 - EXTENT_ORDER));
171 /* populate_physmap requires an array of PFNs that determine where the
172 * guest mapping of the new MFNs. */
173 extent_pfn_arry = malloc((1<<EXTENT_ORDER) * sizeof(xen_pfn_t));
174 if (extent_pfn_arry == NULL) {
175 PERROR("Couldn't allocate extent PFN array.\n");
176 return -ENOMEM;
177 }
179 /* Now allocate the remaining memory as large-order extents. */
180 for (i = 0; i < nr_extents; i++) {
181 /* Initialize the extent PFN array. */
182 for (j = 0; j < (1 << EXTENT_ORDER); j++)
183 extent_pfn_arry[j] = start_pfn++;
185 DPRINTF("populate_physmap(Dom%u, order %u, starting_pfn %llx)\n",
186 domid, EXTENT_ORDER, extent_pfn_arry[0]);
188 if (xc_domain_memory_populate_physmap(xc_handle, domid, 1, EXTENT_ORDER,
189 0, extent_pfn_arry))
190 {
191 PERROR("Could not allocate extents\n");
192 rc = -1;
193 break;
194 }
195 }
197 free(extent_pfn_arry);
198 return rc;
199 }
201 int xc_linux_build(int xc_handle,
202 uint32_t domid,
203 unsigned int mem_mb,
204 const char *image_name,
205 const char *initrd_name,
206 const char *cmdline,
207 const char *features,
208 unsigned long flags,
209 unsigned int store_evtchn,
210 unsigned long *store_mfn,
211 unsigned int console_evtchn,
212 unsigned long *console_mfn)
213 {
214 struct domain_setup_info dsi;
215 xen_pfn_t *page_array = NULL;
216 unsigned long nr_pages;
217 unsigned long devtree_addr = 0;
218 unsigned long kern_addr;
219 unsigned long initrd_base = 0;
220 unsigned long initrd_len = 0;
221 unsigned long rma_pages;
222 unsigned long shadow_mb;
223 u64 shared_info_paddr;
224 u64 store_paddr;
225 u64 console_paddr;
226 int rma_log = 26; /* 64MB RMA */
227 int rc = 0;
228 int op;
229 struct ft_cxt devtree;
231 DPRINTF("%s\n", __func__);
233 nr_pages = mem_mb << (20 - PAGE_SHIFT);
234 DPRINTF("nr_pages 0x%lx\n", nr_pages);
236 rma_pages = (1 << rma_log) >> PAGE_SHIFT;
237 if (rma_pages == 0) {
238 rc = -1;
239 goto out;
240 }
242 /* validate rma_log and domain memory config */
243 if (check_memory_config(rma_log, mem_mb)) {
244 rc = -1;
245 goto out;
246 }
248 /* Allocate the RMA. */
249 DPRINTF("RMA: 0x%lx pages\n", rma_pages);
250 if (xc_alloc_real_mode_area(xc_handle, domid, rma_log)) {
251 rc = -1;
252 goto out;
253 }
255 /* Get the MFN mapping (for RMA only -- we only load data into the RMA). */
256 if (get_rma_page_array(xc_handle, domid, &page_array, rma_pages)) {
257 rc = -1;
258 goto out;
259 }
261 /* Allocate the non-RMA memory. */
262 rc = alloc_memory(xc_handle, domid, nr_pages, rma_pages);
263 if (rc) {
264 goto out;
265 }
267 /* Load kernel. */
268 DPRINTF("loading image '%s'\n", image_name);
269 if (load_elf_kernel(xc_handle, domid, image_name, &dsi, page_array)) {
270 rc = -1;
271 goto out;
272 }
273 kern_addr = 0;
275 /* Load initrd. */
276 if (initrd_name && initrd_name[0] != '\0') {
277 DPRINTF("loading initrd '%s'\n", initrd_name);
278 if (load_initrd(xc_handle, domid, page_array, initrd_name,
279 &initrd_base, &initrd_len)) {
280 rc = -1;
281 goto out;
282 }
283 }
285 /* fetch the current shadow_memory value for this domain */
286 op = XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION;
287 if (xc_shadow_control(xc_handle, domid, op, NULL, 0,
288 &shadow_mb, 0, NULL) < 0) {
289 rc = -1;
290 goto out;
291 }
293 /* determine shared_info, console, and store paddr */
294 shared_info_paddr = (rma_pages << PAGE_SHIFT) - PAGE_SIZE;
295 console_paddr = shared_info_paddr - PAGE_SIZE;
296 store_paddr = console_paddr - PAGE_SIZE;
298 /* map paddrs to mfns */
299 *store_mfn = page_array[(xen_pfn_t)(store_paddr >> PAGE_SHIFT)];
300 *console_mfn = page_array[(xen_pfn_t)(console_paddr >> PAGE_SHIFT)];
301 DPRINTF("console_mfn->%08lx store_mfn->%08lx\n", *console_mfn,
302 *store_mfn);
304 /* build the devtree here */
305 DPRINTF("constructing devtree\n");
306 if (make_devtree(&devtree, domid, mem_mb, (rma_pages << PAGE_SHIFT),
307 shadow_mb, initrd_base, initrd_len, cmdline,
308 shared_info_paddr, console_evtchn, console_paddr,
309 store_evtchn, store_paddr) < 0) {
310 DPRINTF("failed to create flattened device tree\n");
311 rc = -1;
312 goto out;
313 }
315 devtree_addr = DEVTREE_ADDR;
316 DPRINTF("loading flattened device tree to 0x%lx[0x%x]\n",
317 devtree_addr, devtree.bph->totalsize);
319 if (install_image(xc_handle, domid, page_array, (void *)devtree.bph,
320 devtree_addr, devtree.bph->totalsize)) {
321 DPRINTF("couldn't load flattened device tree.\n");
322 rc = -1;
323 goto out2;
324 }
326 if (init_boot_vcpu(xc_handle, domid, &dsi, devtree_addr, kern_addr)) {
327 rc = -1;
328 goto out2;
329 }
331 out2:
332 free_devtree(&devtree);
333 out:
334 free_page_array(page_array);
335 return rc;
336 }