ia64/xen-unstable

view netbsd-2.0-xen-sparse/sys/arch/xen/i386/machdep.c @ 4978:d5741cfb6618

bitkeeper revision 1.1447 (428b4444vZlkscBp_EOZwbunWL1zPA)

Merge arcadians.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into arcadians.cl.cam.ac.uk:/local/scratch-2/vh249/xen-unstable.bk
author vh249@arcadians.cl.cam.ac.uk
date Wed May 18 13:33:56 2005 +0000 (2005-05-18)
parents 66543cb296a9 97d31548a2b1
children
line source
1 /* $NetBSD: machdep.c,v 1.2.2.1 2004/05/22 15:58:02 he Exp $ */
2 /* NetBSD: machdep.c,v 1.552 2004/03/24 15:34:49 atatat Exp */
4 /*-
5 * Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
41 /*-
42 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
43 * All rights reserved.
44 *
45 * This code is derived from software contributed to Berkeley by
46 * William Jolitz.
47 *
48 * Redistribution and use in source and binary forms, with or without
49 * modification, are permitted provided that the following conditions
50 * are met:
51 * 1. Redistributions of source code must retain the above copyright
52 * notice, this list of conditions and the following disclaimer.
53 * 2. Redistributions in binary form must reproduce the above copyright
54 * notice, this list of conditions and the following disclaimer in the
55 * documentation and/or other materials provided with the distribution.
56 * 3. Neither the name of the University nor the names of its contributors
57 * may be used to endorse or promote products derived from this software
58 * without specific prior written permission.
59 *
60 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
61 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
62 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
63 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 * SUCH DAMAGE.
71 *
72 * @(#)machdep.c 7.4 (Berkeley) 6/3/91
73 */
75 #include <sys/cdefs.h>
76 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.2.2.1 2004/05/22 15:58:02 he Exp $");
78 #include "opt_beep.h"
79 #include "opt_compat_ibcs2.h"
80 #include "opt_compat_mach.h" /* need to get the right segment def */
81 #include "opt_compat_netbsd.h"
82 #include "opt_compat_svr4.h"
83 #include "opt_cpureset_delay.h"
84 #include "opt_cputype.h"
85 #include "opt_ddb.h"
86 #include "opt_ipkdb.h"
87 #include "opt_kgdb.h"
88 #include "opt_mtrr.h"
89 #include "opt_multiprocessor.h"
90 #include "opt_realmem.h"
91 #include "opt_user_ldt.h"
92 #include "opt_vm86.h"
93 #include "opt_xen.h"
95 #include <sys/param.h>
96 #include <sys/systm.h>
97 #include <sys/signal.h>
98 #include <sys/signalvar.h>
99 #include <sys/kernel.h>
100 #include <sys/proc.h>
101 #include <sys/user.h>
102 #include <sys/exec.h>
103 #include <sys/buf.h>
104 #include <sys/reboot.h>
105 #include <sys/conf.h>
106 #include <sys/file.h>
107 #include <sys/malloc.h>
108 #include <sys/mbuf.h>
109 #include <sys/msgbuf.h>
110 #include <sys/mount.h>
111 #include <sys/vnode.h>
112 #include <sys/extent.h>
113 #include <sys/syscallargs.h>
114 #include <sys/core.h>
115 #include <sys/kcore.h>
116 #include <sys/ucontext.h>
117 #include <machine/kcore.h>
118 #include <sys/ras.h>
119 #include <sys/sa.h>
120 #include <sys/savar.h>
121 #include <sys/ksyms.h>
123 #ifdef IPKDB
124 #include <ipkdb/ipkdb.h>
125 #endif
127 #ifdef KGDB
128 #include <sys/kgdb.h>
129 #endif
131 #include <dev/cons.h>
133 #include <uvm/uvm_extern.h>
134 #include <uvm/uvm_page.h>
136 #include <sys/sysctl.h>
138 #include <machine/cpu.h>
139 #include <machine/cpufunc.h>
140 #include <machine/cpuvar.h>
141 #include <machine/gdt.h>
142 #include <machine/pio.h>
143 #include <machine/psl.h>
144 #include <machine/reg.h>
145 #include <machine/specialreg.h>
146 #include <machine/bootinfo.h>
147 #include <machine/mtrr.h>
148 #include <machine/evtchn.h>
150 #include <dev/isa/isareg.h>
151 #include <machine/isa_machdep.h>
152 #include <dev/ic/i8042reg.h>
154 #ifdef DDB
155 #include <machine/db_machdep.h>
156 #include <ddb/db_extern.h>
157 #endif
159 #ifdef VM86
160 #include <machine/vm86.h>
161 #endif
163 #include "acpi.h"
164 #include "apm.h"
165 #include "bioscall.h"
167 #if NBIOSCALL > 0
168 #include <machine/bioscall.h>
169 #endif
171 #if NACPI > 0
172 #include <dev/acpi/acpivar.h>
173 #define ACPI_MACHDEP_PRIVATE
174 #include <machine/acpi_machdep.h>
175 #endif
177 #if NAPM > 0
178 #include <machine/apmvar.h>
179 #endif
181 #include "isa.h"
182 #include "isadma.h"
183 #include "npx.h"
184 #include "ksyms.h"
186 #include "mca.h"
187 #if NMCA > 0
188 #include <machine/mca_machdep.h> /* for mca_busprobe() */
189 #endif
191 #ifdef MULTIPROCESSOR /* XXX */
192 #include <machine/mpbiosvar.h> /* XXX */
193 #endif /* XXX */
195 #include <machine/xen.h>
196 #include <machine/hypervisor.h>
198 #if defined(DDB) || defined(KGDB)
199 #include <ddb/db_interface.h>
200 #include <ddb/db_output.h>
202 void ddb_trap_hook(int);
203 #endif
205 /* #define XENDEBUG */
206 /* #define XENDEBUG_LOW */
208 #ifdef XENDEBUG
209 extern void printk(char *, ...);
210 #define XENPRINTF(x) printf x
211 #define XENPRINTK(x) printk x
212 #else
213 #define XENPRINTF(x)
214 #define XENPRINTK(x)
215 #endif
216 #define PRINTK(x) printf x
218 #ifdef XENDEBUG_LOW
219 void xen_dbglow_init(void);
220 #endif
222 #ifndef BEEP_ONHALT_COUNT
223 #define BEEP_ONHALT_COUNT 3
224 #endif
225 #ifndef BEEP_ONHALT_PITCH
226 #define BEEP_ONHALT_PITCH 1500
227 #endif
228 #ifndef BEEP_ONHALT_PERIOD
229 #define BEEP_ONHALT_PERIOD 250
230 #endif
232 /* the following is used externally (sysctl_hw) */
233 char machine[] = "i386"; /* CPU "architecture" */
234 char machine_arch[] = "i386"; /* machine == machine_arch */
236 char bootinfo[BOOTINFO_MAXSIZE];
238 struct bi_devmatch *i386_alldisks = NULL;
239 int i386_ndisks = 0;
241 #ifdef CPURESET_DELAY
242 int cpureset_delay = CPURESET_DELAY;
243 #else
244 int cpureset_delay = 2000; /* default to 2s */
245 #endif
247 #ifdef MTRR
248 struct mtrr_funcs *mtrr_funcs;
249 #endif
251 #ifdef COMPAT_NOMID
252 static int exec_nomid(struct proc *, struct exec_package *);
253 #endif
255 int physmem;
256 int dumpmem_low;
257 int dumpmem_high;
258 unsigned int cpu_feature;
259 int cpu_class;
260 int i386_fpu_present;
261 int i386_fpu_exception;
262 int i386_fpu_fdivbug;
264 int i386_use_fxsave;
265 int i386_has_sse;
266 int i386_has_sse2;
268 int tmx86_has_longrun;
270 vaddr_t msgbuf_vaddr;
271 paddr_t msgbuf_paddr;
273 vaddr_t idt_vaddr;
274 paddr_t idt_paddr;
276 #ifdef I586_CPU
277 vaddr_t pentium_idt_vaddr;
278 #endif
280 struct vm_map *exec_map = NULL;
281 struct vm_map *mb_map = NULL;
282 struct vm_map *phys_map = NULL;
284 extern paddr_t avail_start, avail_end;
285 extern paddr_t pmap_pa_start, pmap_pa_end;
287 #ifdef ISA_CLOCK
288 void (*delay_func)(int) = i8254_delay;
289 void (*microtime_func)(struct timeval *) = i8254_microtime;
290 void (*initclock_func)(void) = i8254_initclocks;
291 #else
292 void (*delay_func)(int) = xen_delay;
293 void (*microtime_func)(struct timeval *) = xen_microtime;
294 void (*initclock_func)(void) = xen_initclocks;
295 #endif
297 void hypervisor_callback(void);
298 void failsafe_callback(void);
300 /*
301 * Size of memory segments, before any memory is stolen.
302 */
303 phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
304 int mem_cluster_cnt;
306 int cpu_dump(void);
307 int cpu_dumpsize(void);
308 u_long cpu_dump_mempagecnt(void);
309 void dumpsys(void);
310 void init386(paddr_t);
311 void initgdt(void);
313 #if !defined(REALBASEMEM) && !defined(REALEXTMEM)
314 void add_mem_cluster(u_int64_t, u_int64_t, u_int32_t);
315 #endif /* !defnied(REALBASEMEM) && !defined(REALEXTMEM) */
317 extern int time_adjusted;
319 /*
320 * Machine-dependent startup code
321 */
322 void
323 cpu_startup()
324 {
325 int x;
326 vaddr_t minaddr, maxaddr;
327 char pbuf[9];
329 /*
330 * Initialize error message buffer (et end of core).
331 */
332 msgbuf_vaddr = uvm_km_valloc(kernel_map, x86_round_page(MSGBUFSIZE));
333 if (msgbuf_vaddr == 0)
334 panic("failed to valloc msgbuf_vaddr");
336 /* msgbuf_paddr was init'd in pmap */
337 for (x = 0; x < btoc(MSGBUFSIZE); x++)
338 pmap_kenter_pa((vaddr_t)msgbuf_vaddr + x * PAGE_SIZE,
339 msgbuf_paddr + x * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);
340 pmap_update(pmap_kernel());
342 initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE));
344 printf("%s", version);
346 #ifdef TRAPLOG
347 /*
348 * Enable recording of branch from/to in MSR's
349 */
350 wrmsr(MSR_DEBUGCTLMSR, 0x1);
351 #endif
353 format_bytes(pbuf, sizeof(pbuf), ptoa(physmem));
354 printf("total memory = %s\n", pbuf);
356 minaddr = 0;
358 /*
359 * Allocate a submap for exec arguments. This map effectively
360 * limits the number of processes exec'ing at any time.
361 */
362 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
363 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
365 /*
366 * Allocate a submap for physio
367 */
368 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
369 VM_PHYS_SIZE, 0, FALSE, NULL);
371 /*
372 * Finally, allocate mbuf cluster submap.
373 */
374 mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
375 nmbclusters * mclbytes, VM_MAP_INTRSAFE, FALSE, NULL);
377 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
378 printf("avail memory = %s\n", pbuf);
380 /* Safe for i/o port / memory space allocation to use malloc now. */
381 x86_bus_space_mallocok();
382 }
384 /*
385 * Set up proc0's TSS and LDT.
386 */
387 void
388 i386_proc0_tss_ldt_init()
389 {
390 struct pcb *pcb;
391 int x;
393 gdt_init();
395 cpu_info_primary.ci_curpcb = pcb = &lwp0.l_addr->u_pcb;
397 pcb->pcb_tss.tss_ioopt =
398 ((caddr_t)pcb->pcb_iomap - (caddr_t)&pcb->pcb_tss) << 16
399 | SEL_KPL; /* i/o pl */
401 for (x = 0; x < sizeof(pcb->pcb_iomap) / 4; x++)
402 pcb->pcb_iomap[x] = 0xffffffff;
404 pcb->pcb_ldt_sel = pmap_kernel()->pm_ldt_sel = GSEL(GLDT_SEL, SEL_KPL);
405 pcb->pcb_cr0 = rcr0();
406 pcb->pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
407 pcb->pcb_tss.tss_esp0 = (int)lwp0.l_addr + USPACE - 16;
408 lwp0.l_md.md_regs = (struct trapframe *)pcb->pcb_tss.tss_esp0 - 1;
409 lwp0.l_md.md_tss_sel = tss_alloc(pcb);
411 #ifndef XEN
412 ltr(lwp0.l_md.md_tss_sel);
413 lldt(pcb->pcb_ldt_sel);
414 #else
415 HYPERVISOR_fpu_taskswitch(1);
416 XENPRINTF(("lwp tss sp %p ss %04x/%04x\n",
417 (void *)pcb->pcb_tss.tss_esp0,
418 pcb->pcb_tss.tss_ss0, IDXSEL(pcb->pcb_tss.tss_ss0)));
419 HYPERVISOR_stack_switch(pcb->pcb_tss.tss_ss0, pcb->pcb_tss.tss_esp0);
420 #endif
421 }
423 /*
424 * Set up TSS and LDT for a new PCB.
425 */
427 void
428 i386_init_pcb_tss_ldt(struct cpu_info *ci)
429 {
430 int x;
431 struct pcb *pcb = ci->ci_idle_pcb;
433 pcb->pcb_tss.tss_ioopt =
434 ((caddr_t)pcb->pcb_iomap - (caddr_t)&pcb->pcb_tss) << 16
435 | SEL_KPL; /* i/o pl */
436 for (x = 0; x < sizeof(pcb->pcb_iomap) / 4; x++)
437 pcb->pcb_iomap[x] = 0xffffffff;
439 pcb->pcb_ldt_sel = pmap_kernel()->pm_ldt_sel = GSEL(GLDT_SEL, SEL_KPL);
440 pcb->pcb_cr0 = rcr0();
442 ci->ci_idle_tss_sel = tss_alloc(pcb);
443 }
445 /*
446 * Switch context:
447 * - honor CR0_TS in saved CR0 and request DNA exception on FPU use
448 * - switch stack pointer for user->kernel transition
449 */
450 void
451 i386_switch_context(struct pcb *new)
452 {
453 dom0_op_t op;
454 struct cpu_info *ci;
456 ci = curcpu();
457 if (ci->ci_fpused) {
458 HYPERVISOR_fpu_taskswitch(1);
459 ci->ci_fpused = 0;
460 }
462 HYPERVISOR_stack_switch(new->pcb_tss.tss_ss0, new->pcb_tss.tss_esp0);
464 if (xen_start_info.flags & SIF_PRIVILEGED) {
465 op.cmd = DOM0_IOPL;
466 op.u.iopl.domain = DOMID_SELF;
467 op.u.iopl.iopl = new->pcb_tss.tss_ioopt & SEL_RPL; /* i/o pl */
468 HYPERVISOR_dom0_op(&op);
469 }
470 }
472 /*
473 * sysctl helper routine for machdep.tm* nodes.
474 */
475 static int
476 sysctl_machdep_tm_longrun(SYSCTLFN_ARGS)
477 {
478 struct sysctlnode node;
479 int io, error;
481 if (!tmx86_has_longrun)
482 return (EOPNOTSUPP);
484 node = *rnode;
485 node.sysctl_data = &io;
487 switch (rnode->sysctl_num) {
488 case CPU_TMLR_MODE:
489 io = (int)(crusoe_longrun = tmx86_get_longrun_mode());
490 break;
491 case CPU_TMLR_FREQUENCY:
492 tmx86_get_longrun_status_all();
493 io = crusoe_frequency;
494 break;
495 case CPU_TMLR_VOLTAGE:
496 tmx86_get_longrun_status_all();
497 io = crusoe_voltage;
498 break;
499 case CPU_TMLR_PERCENTAGE:
500 tmx86_get_longrun_status_all();
501 io = crusoe_percentage;
502 break;
503 default:
504 return (EOPNOTSUPP);
505 }
507 error = sysctl_lookup(SYSCTLFN_CALL(&node));
508 if (error || newp == NULL)
509 return (error);
511 if (rnode->sysctl_num == CPU_TMLR_MODE) {
512 if (tmx86_set_longrun_mode(io))
513 crusoe_longrun = (u_int)io;
514 else
515 return (EINVAL);
516 }
518 return (0);
519 }
521 /*
522 * sysctl helper routine for machdep.booted_kernel
523 */
524 static int
525 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
526 {
527 struct btinfo_bootpath *bibp;
528 struct sysctlnode node;
530 bibp = lookup_bootinfo(BTINFO_BOOTPATH);
531 if(!bibp)
532 return(ENOENT); /* ??? */
534 node = *rnode;
535 node.sysctl_data = bibp->bootpath;
536 node.sysctl_size = sizeof(bibp->bootpath);
537 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
538 }
540 /*
541 * sysctl helper routine for machdep.diskinfo
542 */
543 static int
544 sysctl_machdep_diskinfo(SYSCTLFN_ARGS)
545 {
546 struct sysctlnode node;
548 node = *rnode;
549 node.sysctl_data = i386_alldisks;
550 node.sysctl_size = sizeof(struct disklist) +
551 (i386_ndisks - 1) * sizeof(struct nativedisk_info);
552 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
553 }
555 /*
556 * machine dependent system variables.
557 */
558 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
559 {
561 sysctl_createv(clog, 0, NULL, NULL,
562 CTLFLAG_PERMANENT,
563 CTLTYPE_NODE, "machdep", NULL,
564 NULL, 0, NULL, 0,
565 CTL_MACHDEP, CTL_EOL);
567 sysctl_createv(clog, 0, NULL, NULL,
568 CTLFLAG_PERMANENT,
569 CTLTYPE_STRUCT, "console_device", NULL,
570 sysctl_consdev, 0, NULL, sizeof(dev_t),
571 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
572 sysctl_createv(clog, 0, NULL, NULL,
573 CTLFLAG_PERMANENT,
574 CTLTYPE_INT, "biosbasemem", NULL,
575 NULL, 0, &biosbasemem, 0,
576 CTL_MACHDEP, CPU_BIOSBASEMEM, CTL_EOL);
577 sysctl_createv(clog, 0, NULL, NULL,
578 CTLFLAG_PERMANENT,
579 CTLTYPE_INT, "biosextmem", NULL,
580 NULL, 0, &biosextmem, 0,
581 CTL_MACHDEP, CPU_BIOSEXTMEM, CTL_EOL);
582 sysctl_createv(clog, 0, NULL, NULL,
583 CTLFLAG_PERMANENT,
584 CTLTYPE_INT, "nkpde", NULL,
585 NULL, 0, &nkpde, 0,
586 CTL_MACHDEP, CPU_NKPDE, CTL_EOL);
587 sysctl_createv(clog, 0, NULL, NULL,
588 CTLFLAG_PERMANENT,
589 CTLTYPE_STRING, "booted_kernel", NULL,
590 sysctl_machdep_booted_kernel, 0, NULL, 0,
591 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
592 sysctl_createv(clog, 0, NULL, NULL,
593 CTLFLAG_PERMANENT,
594 CTLTYPE_STRUCT, "diskinfo", NULL,
595 sysctl_machdep_diskinfo, 0, NULL, 0,
596 CTL_MACHDEP, CPU_DISKINFO, CTL_EOL);
597 sysctl_createv(clog, 0, NULL, NULL,
598 CTLFLAG_PERMANENT,
599 CTLTYPE_INT, "fpu_present", NULL,
600 NULL, 0, &i386_fpu_present, 0,
601 CTL_MACHDEP, CPU_FPU_PRESENT, CTL_EOL);
602 sysctl_createv(clog, 0, NULL, NULL,
603 CTLFLAG_PERMANENT,
604 CTLTYPE_INT, "osfxsr", NULL,
605 NULL, 0, &i386_use_fxsave, 0,
606 CTL_MACHDEP, CPU_OSFXSR, CTL_EOL);
607 sysctl_createv(clog, 0, NULL, NULL,
608 CTLFLAG_PERMANENT,
609 CTLTYPE_INT, "sse", NULL,
610 NULL, 0, &i386_has_sse, 0,
611 CTL_MACHDEP, CPU_SSE, CTL_EOL);
612 sysctl_createv(clog, 0, NULL, NULL,
613 CTLFLAG_PERMANENT,
614 CTLTYPE_INT, "sse2", NULL,
615 NULL, 0, &i386_has_sse2, 0,
616 CTL_MACHDEP, CPU_SSE2, CTL_EOL);
617 sysctl_createv(clog, 0, NULL, NULL,
618 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
619 CTLTYPE_INT, "tm_longrun_mode", NULL,
620 sysctl_machdep_tm_longrun, 0, NULL, 0,
621 CTL_MACHDEP, CPU_TMLR_MODE, CTL_EOL);
622 sysctl_createv(clog, 0, NULL, NULL,
623 CTLFLAG_PERMANENT,
624 CTLTYPE_INT, "tm_longrun_frequency", NULL,
625 sysctl_machdep_tm_longrun, 0, NULL, 0,
626 CTL_MACHDEP, CPU_TMLR_FREQUENCY, CTL_EOL);
627 sysctl_createv(clog, 0, NULL, NULL,
628 CTLFLAG_PERMANENT,
629 CTLTYPE_INT, "tm_longrun_voltage", NULL,
630 sysctl_machdep_tm_longrun, 0, NULL, 0,
631 CTL_MACHDEP, CPU_TMLR_VOLTAGE, CTL_EOL);
632 sysctl_createv(clog, 0, NULL, NULL,
633 CTLFLAG_PERMANENT,
634 CTLTYPE_INT, "tm_longrun_percentage", NULL,
635 sysctl_machdep_tm_longrun, 0, NULL, 0,
636 CTL_MACHDEP, CPU_TMLR_PERCENTAGE, CTL_EOL);
637 }
639 void *
640 getframe(struct lwp *l, int sig, int *onstack)
641 {
642 struct proc *p = l->l_proc;
643 struct sigctx *ctx = &p->p_sigctx;
644 struct trapframe *tf = l->l_md.md_regs;
646 /* Do we need to jump onto the signal stack? */
647 *onstack = (ctx->ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0
648 && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
649 if (*onstack)
650 return (char *)ctx->ps_sigstk.ss_sp + ctx->ps_sigstk.ss_size;
651 #ifdef VM86
652 if (tf->tf_eflags & PSL_VM)
653 return (void *)(tf->tf_esp + (tf->tf_ss << 4));
654 else
655 #endif
656 return (void *)tf->tf_esp;
657 }
659 /*
660 * Build context to run handler in. We invoke the handler
661 * directly, only returning via the trampoline. Note the
662 * trampoline version numbers are coordinated with machine-
663 * dependent code in libc.
664 */
665 void
666 buildcontext(struct lwp *l, int sel, void *catcher, void *fp)
667 {
668 struct trapframe *tf = l->l_md.md_regs;
670 tf->tf_gs = GSEL(GUDATA_SEL, SEL_UPL);
671 tf->tf_fs = GSEL(GUDATA_SEL, SEL_UPL);
672 tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL);
673 tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
674 tf->tf_eip = (int)catcher;
675 tf->tf_cs = GSEL(sel, SEL_UPL);
676 tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC);
677 tf->tf_esp = (int)fp;
678 tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);
679 }
681 static void
682 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
683 {
684 struct lwp *l = curlwp;
685 struct proc *p = l->l_proc;
686 struct pmap *pmap = vm_map_pmap(&p->p_vmspace->vm_map);
687 int sel = pmap->pm_hiexec > I386_MAX_EXE_ADDR ?
688 GUCODEBIG_SEL : GUCODE_SEL;
689 struct sigacts *ps = p->p_sigacts;
690 int onstack;
691 int sig = ksi->ksi_signo;
692 struct sigframe_siginfo *fp = getframe(l, sig, &onstack), frame;
693 sig_t catcher = SIGACTION(p, sig).sa_handler;
694 struct trapframe *tf = l->l_md.md_regs;
696 fp--;
698 /* Build stack frame for signal trampoline. */
699 switch (ps->sa_sigdesc[sig].sd_vers) {
700 case 0: /* handled by sendsig_sigcontext */
701 case 1: /* handled by sendsig_sigcontext */
702 default: /* unknown version */
703 printf("nsendsig: bad version %d\n",
704 ps->sa_sigdesc[sig].sd_vers);
705 sigexit(l, SIGILL);
706 case 2:
707 break;
708 }
710 frame.sf_ra = (int)ps->sa_sigdesc[sig].sd_tramp;
711 frame.sf_signum = sig;
712 frame.sf_sip = &fp->sf_si;
713 frame.sf_ucp = &fp->sf_uc;
714 frame.sf_si._info = ksi->ksi_info;
715 frame.sf_uc.uc_flags = _UC_SIGMASK|_UC_VM;
716 frame.sf_uc.uc_sigmask = *mask;
717 frame.sf_uc.uc_link = NULL;
718 frame.sf_uc.uc_flags |= (p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK)
719 ? _UC_SETSTACK : _UC_CLRSTACK;
720 memset(&frame.sf_uc.uc_stack, 0, sizeof(frame.sf_uc.uc_stack));
721 cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags);
723 if (tf->tf_eflags & PSL_VM)
724 (*p->p_emul->e_syscall_intern)(p);
726 if (copyout(&frame, fp, sizeof(frame)) != 0) {
727 /*
728 * Process has trashed its stack; give it an illegal
729 * instruction to halt it in its tracks.
730 */
731 sigexit(l, SIGILL);
732 /* NOTREACHED */
733 }
735 buildcontext(l, sel, catcher, fp);
737 /* Remember that we're now on the signal stack. */
738 if (onstack)
739 p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
740 }
742 void
743 sendsig(const ksiginfo_t *ksi, const sigset_t *mask)
744 {
745 #ifdef COMPAT_16
746 if (curproc->p_sigacts->sa_sigdesc[ksi->ksi_signo].sd_vers < 2)
747 sendsig_sigcontext(ksi, mask);
748 else
749 #endif
750 sendsig_siginfo(ksi, mask);
751 }
753 void
754 cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted, void *sas,
755 void *ap, void *sp, sa_upcall_t upcall)
756 {
757 struct pmap *pmap = vm_map_pmap(&l->l_proc->p_vmspace->vm_map);
758 struct saframe *sf, frame;
759 struct trapframe *tf;
761 tf = l->l_md.md_regs;
763 /* Finally, copy out the rest of the frame. */
764 frame.sa_type = type;
765 frame.sa_sas = sas;
766 frame.sa_events = nevents;
767 frame.sa_interrupted = ninterrupted;
768 frame.sa_arg = ap;
769 frame.sa_ra = 0;
771 sf = (struct saframe *)sp - 1;
772 if (copyout(&frame, sf, sizeof(frame)) != 0) {
773 /* Copying onto the stack didn't work. Die. */
774 sigexit(l, SIGILL);
775 /* NOTREACHED */
776 }
778 tf->tf_eip = (int) upcall;
779 tf->tf_esp = (int) sf;
780 tf->tf_ebp = 0; /* indicate call-frame-top to debuggers */
781 tf->tf_gs = GSEL(GUDATA_SEL, SEL_UPL);
782 tf->tf_fs = GSEL(GUDATA_SEL, SEL_UPL);
783 tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL);
784 tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
785 tf->tf_cs = pmap->pm_hiexec > I386_MAX_EXE_ADDR ?
786 GSEL(GUCODEBIG_SEL, SEL_UPL) : GSEL(GUCODE_SEL, SEL_UPL);
787 tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);
788 tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC);
789 }
791 int waittime = -1;
792 struct pcb dumppcb;
794 void
795 cpu_reboot(int howto, char *bootstr)
796 {
798 if (cold) {
799 howto |= RB_HALT;
800 goto haltsys;
801 }
803 boothowto = howto;
804 if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
805 waittime = 0;
806 vfs_shutdown();
807 /*
808 * If we've been adjusting the clock, the todr
809 * will be out of synch; adjust it now.
810 */
811 if (time_adjusted != 0)
812 resettodr();
813 }
815 /* Disable interrupts. */
816 splhigh();
818 /* Do a dump if requested. */
819 if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
820 dumpsys();
822 haltsys:
823 doshutdownhooks();
825 #ifdef MULTIPROCESSOR
826 x86_broadcast_ipi(X86_IPI_HALT);
827 #endif
829 if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
830 #if NACPI > 0
831 if (acpi_softc != NULL) {
832 delay(500000);
833 acpi_enter_sleep_state(acpi_softc, ACPI_STATE_S5);
834 printf("WARNING: ACPI powerdown failed!\n");
835 }
836 #endif
837 #if NAPM > 0 && !defined(APM_NO_POWEROFF)
838 /* turn off, if we can. But try to turn disk off and
839 * wait a bit first--some disk drives are slow to clean up
840 * and users have reported disk corruption.
841 */
842 delay(500000);
843 apm_set_powstate(APM_DEV_DISK(0xff), APM_SYS_OFF);
844 delay(500000);
845 apm_set_powstate(APM_DEV_ALLDEVS, APM_SYS_OFF);
846 printf("WARNING: APM powerdown failed!\n");
847 /*
848 * RB_POWERDOWN implies RB_HALT... fall into it...
849 */
850 #endif
851 HYPERVISOR_shutdown();
852 }
854 if (howto & RB_HALT) {
855 printf("\n");
856 printf("The operating system has halted.\n");
858 /* XXX cngetc() below doesn't work, shutdown machine for now */
859 HYPERVISOR_shutdown();
861 printf("Please press any key to reboot.\n\n");
863 #ifdef BEEP_ONHALT
864 {
865 int c;
866 for (c = BEEP_ONHALT_COUNT; c > 0; c--) {
867 sysbeep(BEEP_ONHALT_PITCH,
868 BEEP_ONHALT_PERIOD * hz / 1000);
869 delay(BEEP_ONHALT_PERIOD * 1000);
870 sysbeep(0, BEEP_ONHALT_PERIOD * hz / 1000);
871 delay(BEEP_ONHALT_PERIOD * 1000);
872 }
873 }
874 #endif
876 cnpollc(1); /* for proper keyboard command handling */
877 if (cngetc() == 0) {
878 /* no console attached, so just hlt */
879 for(;;) {
880 __asm __volatile("hlt");
881 }
882 }
883 cnpollc(0);
884 }
886 printf("rebooting...\n");
887 if (cpureset_delay > 0)
888 delay(cpureset_delay * 1000);
889 cpu_reset();
890 for(;;) ;
891 /*NOTREACHED*/
892 }
894 /*
895 * These variables are needed by /sbin/savecore
896 */
897 u_int32_t dumpmag = 0x8fca0101; /* magic number */
898 int dumpsize = 0; /* pages */
899 long dumplo = 0; /* blocks */
901 /*
902 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
903 */
904 int
905 cpu_dumpsize()
906 {
907 int size;
909 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
910 ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
911 if (roundup(size, dbtob(1)) != dbtob(1))
912 return (-1);
914 return (1);
915 }
917 /*
918 * cpu_dump_mempagecnt: calculate the size of RAM (in pages) to be dumped.
919 */
920 u_long
921 cpu_dump_mempagecnt()
922 {
923 u_long i, n;
925 n = 0;
926 for (i = 0; i < mem_cluster_cnt; i++)
927 n += atop(mem_clusters[i].size);
928 return (n);
929 }
931 /*
932 * cpu_dump: dump the machine-dependent kernel core dump headers.
933 */
934 int
935 cpu_dump()
936 {
937 int (*dump)(dev_t, daddr_t, caddr_t, size_t);
938 char buf[dbtob(1)];
939 kcore_seg_t *segp;
940 cpu_kcore_hdr_t *cpuhdrp;
941 phys_ram_seg_t *memsegp;
942 const struct bdevsw *bdev;
943 int i;
945 bdev = bdevsw_lookup(dumpdev);
946 if (bdev == NULL)
947 return (ENXIO);
948 dump = bdev->d_dump;
950 memset(buf, 0, sizeof buf);
951 segp = (kcore_seg_t *)buf;
952 cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))];
953 memsegp = (phys_ram_seg_t *)&buf[ ALIGN(sizeof(*segp)) +
954 ALIGN(sizeof(*cpuhdrp))];
956 /*
957 * Generate a segment header.
958 */
959 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
960 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
962 /*
963 * Add the machine-dependent header info.
964 */
965 cpuhdrp->ptdpaddr = PTDpaddr;
966 cpuhdrp->nmemsegs = mem_cluster_cnt;
968 /*
969 * Fill in the memory segment descriptors.
970 */
971 for (i = 0; i < mem_cluster_cnt; i++) {
972 memsegp[i].start = mem_clusters[i].start;
973 memsegp[i].size = mem_clusters[i].size;
974 }
976 return (dump(dumpdev, dumplo, (caddr_t)buf, dbtob(1)));
977 }
979 /*
980 * This is called by main to set dumplo and dumpsize.
981 * Dumps always skip the first PAGE_SIZE of disk space
982 * in case there might be a disk label stored there.
983 * If there is extra space, put dump at the end to
984 * reduce the chance that swapping trashes it.
985 */
986 void
987 cpu_dumpconf()
988 {
989 const struct bdevsw *bdev;
990 int nblks, dumpblks; /* size of dump area */
992 if (dumpdev == NODEV)
993 goto bad;
994 bdev = bdevsw_lookup(dumpdev);
995 if (bdev == NULL)
996 panic("dumpconf: bad dumpdev=0x%x", dumpdev);
997 if (bdev->d_psize == NULL)
998 goto bad;
999 nblks = (*bdev->d_psize)(dumpdev);
1000 if (nblks <= ctod(1))
1001 goto bad;
1003 dumpblks = cpu_dumpsize();
1004 if (dumpblks < 0)
1005 goto bad;
1006 dumpblks += ctod(cpu_dump_mempagecnt());
1008 /* If dump won't fit (incl. room for possible label), punt. */
1009 if (dumpblks > (nblks - ctod(1)))
1010 goto bad;
1012 /* Put dump at end of partition */
1013 dumplo = nblks - dumpblks;
1015 /* dumpsize is in page units, and doesn't include headers. */
1016 dumpsize = cpu_dump_mempagecnt();
1017 return;
1019 bad:
1020 dumpsize = 0;
1023 /*
1024 * Doadump comes here after turning off memory management and
1025 * getting on the dump stack, either when called above, or by
1026 * the auto-restart code.
1027 */
1028 #define BYTES_PER_DUMP PAGE_SIZE /* must be a multiple of pagesize XXX small */
1029 static vaddr_t dumpspace;
1031 vaddr_t
1032 reserve_dumppages(vaddr_t p)
1035 dumpspace = p;
1036 return (p + BYTES_PER_DUMP);
1039 void
1040 dumpsys()
1042 u_long totalbytesleft, bytes, i, n, memseg;
1043 u_long maddr;
1044 int psize;
1045 daddr_t blkno;
1046 const struct bdevsw *bdev;
1047 int (*dump)(dev_t, daddr_t, caddr_t, size_t);
1048 int error;
1050 /* Save registers. */
1051 savectx(&dumppcb);
1053 if (dumpdev == NODEV)
1054 return;
1056 bdev = bdevsw_lookup(dumpdev);
1057 if (bdev == NULL || bdev->d_psize == NULL)
1058 return;
1060 /*
1061 * For dumps during autoconfiguration,
1062 * if dump device has already configured...
1063 */
1064 if (dumpsize == 0)
1065 cpu_dumpconf();
1066 if (dumplo <= 0 || dumpsize == 0) {
1067 printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
1068 minor(dumpdev));
1069 return;
1071 printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
1072 minor(dumpdev), dumplo);
1074 psize = (*bdev->d_psize)(dumpdev);
1075 printf("dump ");
1076 if (psize == -1) {
1077 printf("area unavailable\n");
1078 return;
1081 #if 0 /* XXX this doesn't work. grr. */
1082 /* toss any characters present prior to dump */
1083 while (sget() != NULL); /*syscons and pccons differ */
1084 #endif
1086 if ((error = cpu_dump()) != 0)
1087 goto err;
1089 totalbytesleft = ptoa(cpu_dump_mempagecnt());
1090 blkno = dumplo + cpu_dumpsize();
1091 dump = bdev->d_dump;
1092 error = 0;
1094 for (memseg = 0; memseg < mem_cluster_cnt; memseg++) {
1095 maddr = mem_clusters[memseg].start;
1096 bytes = mem_clusters[memseg].size;
1098 for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
1099 /* Print out how many MBs we have left to go. */
1100 if ((totalbytesleft % (1024*1024)) == 0)
1101 printf("%ld ", totalbytesleft / (1024 * 1024));
1103 /* Limit size for next transfer. */
1104 n = bytes - i;
1105 if (n > BYTES_PER_DUMP)
1106 n = BYTES_PER_DUMP;
1108 (void) pmap_map(dumpspace, maddr, maddr + n,
1109 VM_PROT_READ);
1111 error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, n);
1112 if (error)
1113 goto err;
1114 maddr += n;
1115 blkno += btodb(n); /* XXX? */
1117 #if 0 /* XXX this doesn't work. grr. */
1118 /* operator aborting dump? */
1119 if (sget() != NULL) {
1120 error = EINTR;
1121 break;
1123 #endif
1127 err:
1128 switch (error) {
1130 case ENXIO:
1131 printf("device bad\n");
1132 break;
1134 case EFAULT:
1135 printf("device not ready\n");
1136 break;
1138 case EINVAL:
1139 printf("area improper\n");
1140 break;
1142 case EIO:
1143 printf("i/o error\n");
1144 break;
1146 case EINTR:
1147 printf("aborted from console\n");
1148 break;
1150 case 0:
1151 printf("succeeded\n");
1152 break;
1154 default:
1155 printf("error %d\n", error);
1156 break;
1158 printf("\n\n");
1159 delay(5000000); /* 5 seconds */
1162 /*
1163 * Clear registers on exec
1164 */
1165 void
1166 setregs(struct lwp *l, struct exec_package *pack, u_long stack)
1168 struct pmap *pmap = vm_map_pmap(&l->l_proc->p_vmspace->vm_map);
1169 struct pcb *pcb = &l->l_addr->u_pcb;
1170 struct trapframe *tf;
1172 #if NNPX > 0
1173 /* If we were using the FPU, forget about it. */
1174 if (l->l_addr->u_pcb.pcb_fpcpu != NULL)
1175 npxsave_lwp(l, 0);
1176 #endif
1178 #ifdef USER_LDT
1179 pmap_ldt_cleanup(l);
1180 #endif
1182 l->l_md.md_flags &= ~MDL_USEDFPU;
1183 if (i386_use_fxsave) {
1184 pcb->pcb_savefpu.sv_xmm.sv_env.en_cw = __NetBSD_NPXCW__;
1185 pcb->pcb_savefpu.sv_xmm.sv_env.en_mxcsr = __INITIAL_MXCSR__;
1186 } else
1187 pcb->pcb_savefpu.sv_87.sv_env.en_cw = __NetBSD_NPXCW__;
1189 tf = l->l_md.md_regs;
1190 tf->tf_gs = LSEL(LUDATA_SEL, SEL_UPL);
1191 tf->tf_fs = LSEL(LUDATA_SEL, SEL_UPL);
1192 tf->tf_es = LSEL(LUDATA_SEL, SEL_UPL);
1193 tf->tf_ds = LSEL(LUDATA_SEL, SEL_UPL);
1194 tf->tf_edi = 0;
1195 tf->tf_esi = 0;
1196 tf->tf_ebp = 0;
1197 tf->tf_ebx = (int)l->l_proc->p_psstr;
1198 tf->tf_edx = 0;
1199 tf->tf_ecx = 0;
1200 tf->tf_eax = 0;
1201 tf->tf_eip = pack->ep_entry;
1202 tf->tf_cs = pmap->pm_hiexec > I386_MAX_EXE_ADDR ?
1203 LSEL(LUCODEBIG_SEL, SEL_UPL) : LSEL(LUCODE_SEL, SEL_UPL);
1204 tf->tf_eflags = PSL_USERSET;
1205 tf->tf_esp = stack;
1206 tf->tf_ss = LSEL(LUDATA_SEL, SEL_UPL);
1209 /*
1210 * Initialize segments and descriptor tables
1211 */
1213 union descriptor *gdt, *ldt;
1214 struct gate_descriptor *idt;
1215 char idt_allocmap[NIDT];
1216 struct simplelock idt_lock = SIMPLELOCK_INITIALIZER;
1217 #ifdef I586_CPU
1218 union descriptor *pentium_idt;
1219 #endif
1220 extern struct user *proc0paddr;
1222 void
1223 setgate(struct gate_descriptor *gd, void *func, int args, int type, int dpl,
1224 int sel)
1227 gd->gd_looffset = (int)func;
1228 gd->gd_selector = sel;
1229 gd->gd_stkcpy = args;
1230 gd->gd_xx = 0;
1231 gd->gd_type = type;
1232 gd->gd_dpl = dpl;
1233 gd->gd_p = 1;
1234 gd->gd_hioffset = (int)func >> 16;
1237 void
1238 unsetgate(struct gate_descriptor *gd)
1240 gd->gd_p = 0;
1241 gd->gd_hioffset = 0;
1242 gd->gd_looffset = 0;
1243 gd->gd_selector = 0;
1244 gd->gd_xx = 0;
1245 gd->gd_stkcpy = 0;
1246 gd->gd_type = 0;
1247 gd->gd_dpl = 0;
1251 void
1252 setregion(struct region_descriptor *rd, void *base, size_t limit)
1255 rd->rd_limit = (int)limit;
1256 rd->rd_base = (int)base;
1259 void
1260 setsegment(struct segment_descriptor *sd, void *base, size_t limit, int type,
1261 int dpl, int def32, int gran)
1264 sd->sd_lolimit = (int)limit;
1265 sd->sd_lobase = (int)base;
1266 sd->sd_type = type;
1267 sd->sd_dpl = dpl;
1268 sd->sd_p = 1;
1269 sd->sd_hilimit = (int)limit >> 16;
1270 sd->sd_xx = 0;
1271 sd->sd_def32 = def32;
1272 sd->sd_gran = gran;
1273 sd->sd_hibase = (int)base >> 24;
1276 #define IDTVEC(name) __CONCAT(X, name)
1277 typedef void (vector)(void);
1278 extern vector IDTVEC(syscall);
1279 extern vector IDTVEC(osyscall);
1280 extern vector *IDTVEC(exceptions)[];
1281 #ifdef COMPAT_SVR4
1282 extern vector IDTVEC(svr4_fasttrap);
1283 #endif /* COMPAT_SVR4 */
1284 #ifdef COMPAT_MACH
1285 extern vector IDTVEC(mach_trap);
1286 #endif
1287 #define MAX_XEN_IDT 128
1288 trap_info_t xen_idt[MAX_XEN_IDT];
1289 int xen_idt_idx;
1291 #define KBTOB(x) ((size_t)(x) * 1024UL)
1293 void cpu_init_idt()
1295 struct region_descriptor region;
1297 panic("cpu_init_idt");
1298 #ifdef I586_CPU
1299 setregion(&region, pentium_idt, NIDT * sizeof(idt[0]) - 1);
1300 #else
1301 setregion(&region, idt, NIDT * sizeof(idt[0]) - 1);
1302 #endif
1303 lidt(&region);
1306 #if !defined(REALBASEMEM) && !defined(REALEXTMEM)
1307 void
1308 add_mem_cluster(u_int64_t seg_start, u_int64_t seg_end, u_int32_t type)
1310 extern struct extent *iomem_ex;
1311 int i;
1313 if (seg_end > 0x100000000ULL) {
1314 printf("WARNING: skipping large "
1315 "memory map entry: "
1316 "0x%qx/0x%qx/0x%x\n",
1317 seg_start,
1318 (seg_end - seg_start),
1319 type);
1320 return;
1323 /*
1324 * XXX Chop the last page off the size so that
1325 * XXX it can fit in avail_end.
1326 */
1327 if (seg_end == 0x100000000ULL)
1328 seg_end -= PAGE_SIZE;
1330 if (seg_end <= seg_start)
1331 return;
1333 for (i = 0; i < mem_cluster_cnt; i++) {
1334 if ((mem_clusters[i].start == round_page(seg_start))
1335 && (mem_clusters[i].size
1336 == trunc_page(seg_end) - mem_clusters[i].start)) {
1337 #ifdef DEBUG_MEMLOAD
1338 printf("WARNING: skipping duplicate segment entry\n");
1339 #endif
1340 return;
1344 /*
1345 * Allocate the physical addresses used by RAM
1346 * from the iomem extent map. This is done before
1347 * the addresses are page rounded just to make
1348 * sure we get them all.
1349 */
1350 if (extent_alloc_region(iomem_ex, seg_start,
1351 seg_end - seg_start, EX_NOWAIT)) {
1352 /* XXX What should we do? */
1353 printf("WARNING: CAN'T ALLOCATE "
1354 "MEMORY SEGMENT "
1355 "(0x%qx/0x%qx/0x%x) FROM "
1356 "IOMEM EXTENT MAP!\n",
1357 seg_start, seg_end - seg_start, type);
1358 return;
1361 /*
1362 * If it's not free memory, skip it.
1363 */
1364 if (type != BIM_Memory)
1365 return;
1367 /* XXX XXX XXX */
1368 if (mem_cluster_cnt >= VM_PHYSSEG_MAX)
1369 panic("init386: too many memory segments");
1371 seg_start = round_page(seg_start);
1372 seg_end = trunc_page(seg_end);
1374 if (seg_start == seg_end)
1375 return;
1377 mem_clusters[mem_cluster_cnt].start = seg_start;
1378 mem_clusters[mem_cluster_cnt].size =
1379 seg_end - seg_start;
1381 if (avail_end < seg_end)
1382 avail_end = seg_end;
1383 physmem += atop(mem_clusters[mem_cluster_cnt].size);
1384 mem_cluster_cnt++;
1386 #endif /* !defined(REALBASEMEM) && !defined(REALEXTMEM) */
1388 void
1389 initgdt()
1391 #if !defined(XEN)
1392 struct region_descriptor region;
1393 #else
1394 paddr_t frames[16];
1395 #endif
1397 #if !defined(XEN)
1398 gdt = tgdt;
1399 memset(gdt, 0, NGDT*sizeof(*gdt));
1400 #endif
1401 /* make gdt gates and memory segments */
1402 setsegment(&gdt[GCODE_SEL].sd, 0, 0xfc3ff, SDT_MEMERA, SEL_KPL, 1, 1);
1403 setsegment(&gdt[GDATA_SEL].sd, 0, 0xfc3ff, SDT_MEMRWA, SEL_KPL, 1, 1);
1404 setsegment(&gdt[GUCODE_SEL].sd, 0, x86_btop(I386_MAX_EXE_ADDR) - 1,
1405 SDT_MEMERA, SEL_UPL, 1, 1);
1406 setsegment(&gdt[GUCODEBIG_SEL].sd, 0, x86_btop(VM_MAXUSER_ADDRESS) - 1,
1407 SDT_MEMERA, SEL_UPL, 1, 1);
1408 setsegment(&gdt[GUDATA_SEL].sd, 0, x86_btop(VM_MAXUSER_ADDRESS) - 1,
1409 SDT_MEMRWA, SEL_UPL, 1, 1);
1410 #ifdef COMPAT_MACH
1411 setgate(&gdt[GMACHCALLS_SEL].gd, &IDTVEC(mach_trap), 1,
1412 SDT_SYS386CGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1413 #endif
1414 #if NBIOSCALL > 0
1415 /* bios trampoline GDT entries */
1416 setsegment(&gdt[GBIOSCODE_SEL].sd, 0, 0xfc3ff, SDT_MEMERA, SEL_KPL, 0,
1417 0);
1418 setsegment(&gdt[GBIOSDATA_SEL].sd, 0, 0xfc3ff, SDT_MEMRWA, SEL_KPL, 0,
1419 0);
1420 #endif
1421 setsegment(&gdt[GCPU_SEL].sd, &cpu_info_primary,
1422 sizeof(struct cpu_info)-1, SDT_MEMRWA, SEL_KPL, 1, 1);
1424 #if !defined(XEN)
1425 setregion(&region, gdt, NGDT * sizeof(gdt[0]) - 1);
1426 lgdt(&region);
1427 #else
1428 frames[0] = xpmap_ptom((uint32_t)gdt - KERNBASE) >> PAGE_SHIFT;
1429 /* pmap_kremove((vaddr_t)gdt, PAGE_SIZE); */
1430 pmap_kenter_pa((vaddr_t)gdt, (uint32_t)gdt - KERNBASE,
1431 VM_PROT_READ);
1432 XENPRINTK(("loading gdt %lx, %d entries\n", frames[0] << PAGE_SHIFT,
1433 NGDT));
1434 if (HYPERVISOR_set_gdt(frames, NGDT))
1435 panic("HYPERVISOR_set_gdt failed!\n");
1436 lgdt_finish();
1437 #endif
1440 void
1441 init386(paddr_t first_avail)
1443 #if !defined(XEN)
1444 union descriptor *tgdt;
1445 #endif
1446 extern void consinit(void);
1447 #if !defined(XEN)
1448 extern struct extent *iomem_ex;
1449 #if !defined(REALBASEMEM) && !defined(REALEXTMEM)
1450 struct btinfo_memmap *bim;
1451 #endif
1452 struct region_descriptor region;
1453 #endif
1454 int x;
1455 #if !defined(XEN)
1456 int first16q;
1457 u_int64_t seg_start, seg_end;
1458 u_int64_t seg_start1, seg_end1;
1459 #endif
1460 paddr_t realmode_reserved_start;
1461 psize_t realmode_reserved_size;
1462 int needs_earlier_install_pte0;
1463 #if NBIOSCALL > 0
1464 extern int biostramp_image_size;
1465 extern u_char biostramp_image[];
1466 #endif
1468 XENPRINTK(("HYPERVISOR_shared_info %p\n", HYPERVISOR_shared_info));
1469 #ifdef XENDEBUG_LOW
1470 xen_dbglow_init();
1471 #endif
1473 cpu_probe_features(&cpu_info_primary);
1474 cpu_feature = cpu_info_primary.ci_feature_flags;
1476 /* not on Xen... */
1477 cpu_feature &= ~(CPUID_PGE|CPUID_PSE|CPUID_MTRR|CPUID_FXSR);
1479 lwp0.l_addr = proc0paddr;
1480 cpu_info_primary.ci_curpcb = &lwp0.l_addr->u_pcb;
1482 XENPRINTK(("proc0paddr %p pcb %p first_avail %p\n",
1483 proc0paddr, cpu_info_primary.ci_curpcb, (void *)first_avail));
1484 XENPRINTK(("ptdpaddr %p atdevbase %p\n", (void *)PTDpaddr,
1485 (void *)atdevbase));
1487 x86_bus_space_init();
1488 consinit(); /* XXX SHOULD NOT BE DONE HERE */
1489 /*
1490 * Initailize PAGE_SIZE-dependent variables.
1491 */
1492 uvm_setpagesize();
1494 /*
1495 * Saving SSE registers won't work if the save area isn't
1496 * 16-byte aligned.
1497 */
1498 if (offsetof(struct user, u_pcb.pcb_savefpu) & 0xf)
1499 panic("init386: pcb_savefpu not 16-byte aligned");
1501 /*
1502 * Start with 2 color bins -- this is just a guess to get us
1503 * started. We'll recolor when we determine the largest cache
1504 * sizes on the system.
1505 */
1506 uvmexp.ncolors = 2;
1508 #if !defined(XEN)
1509 /*
1510 * BIOS leaves data in physical page 0
1511 * Even if it didn't, our VM system doesn't like using zero as a
1512 * physical page number.
1513 * We may also need pages in low memory (one each) for secondary CPU
1514 * startup, for BIOS calls, and for ACPI, plus a page table page to map
1515 * them into the first few pages of the kernel's pmap.
1516 */
1517 avail_start = PAGE_SIZE;
1518 #else
1519 /* Make sure the end of the space used by the kernel is rounded. */
1520 first_avail = round_page(first_avail);
1521 avail_start = first_avail - KERNBASE;
1522 avail_end = ptoa(xen_start_info.nr_pages) +
1523 (KERNTEXTOFF - KERNBASE_LOCORE);
1524 pmap_pa_start = (KERNTEXTOFF - KERNBASE_LOCORE);
1525 pmap_pa_end = avail_end;
1526 mem_clusters[0].start = avail_start;
1527 mem_clusters[0].size = avail_end - avail_start;
1528 mem_cluster_cnt++;
1529 physmem += atop(mem_clusters[0].size);
1530 #endif
1532 /*
1533 * reserve memory for real-mode call
1534 */
1535 needs_earlier_install_pte0 = 0;
1536 realmode_reserved_start = 0;
1537 realmode_reserved_size = 0;
1538 #if NBIOSCALL > 0
1539 /* save us a page for trampoline code */
1540 realmode_reserved_size += PAGE_SIZE;
1541 needs_earlier_install_pte0 = 1;
1542 #endif
1543 #ifdef MULTIPROCESSOR /* XXX */
1544 #if !defined(XEN)
1545 KASSERT(avail_start == PAGE_SIZE); /* XXX */
1546 #endif
1547 if (realmode_reserved_size < MP_TRAMPOLINE) /* XXX */
1548 realmode_reserved_size = MP_TRAMPOLINE; /* XXX */
1549 needs_earlier_install_pte0 = 1; /* XXX */
1550 #endif /* XXX */
1551 #if NACPI > 0
1552 /* trampoline code for wake handler */
1553 realmode_reserved_size += ptoa(acpi_md_get_npages_of_wakecode()+1);
1554 needs_earlier_install_pte0 = 1;
1555 #endif
1556 if (needs_earlier_install_pte0) {
1557 /* page table for directory entry 0 */
1558 realmode_reserved_size += PAGE_SIZE;
1560 if (realmode_reserved_size>0) {
1561 realmode_reserved_start = avail_start;
1562 avail_start += realmode_reserved_size;
1565 #ifdef DEBUG_MEMLOAD
1566 printf("mem_cluster_count: %d\n", mem_cluster_cnt);
1567 #endif
1569 /*
1570 * Call pmap initialization to make new kernel address space.
1571 * We must do this before loading pages into the VM system.
1572 */
1573 pmap_bootstrap((vaddr_t)atdevbase + IOM_SIZE);
1575 #if !defined(XEN)
1576 #if !defined(REALBASEMEM) && !defined(REALEXTMEM)
1577 /*
1578 * Check to see if we have a memory map from the BIOS (passed
1579 * to us by the boot program.
1580 */
1581 bim = lookup_bootinfo(BTINFO_MEMMAP);
1582 if (bim != NULL && bim->num > 0) {
1583 #ifdef DEBUG_MEMLOAD
1584 printf("BIOS MEMORY MAP (%d ENTRIES):\n", bim->num);
1585 #endif
1586 for (x = 0; x < bim->num; x++) {
1587 #ifdef DEBUG_MEMLOAD
1588 printf(" addr 0x%qx size 0x%qx type 0x%x\n",
1589 bim->entry[x].addr,
1590 bim->entry[x].size,
1591 bim->entry[x].type);
1592 #endif
1594 /*
1595 * If the segment is not memory, skip it.
1596 */
1597 switch (bim->entry[x].type) {
1598 case BIM_Memory:
1599 case BIM_ACPI:
1600 case BIM_NVS:
1601 break;
1602 default:
1603 continue;
1606 /*
1607 * Sanity check the entry.
1608 * XXX Need to handle uint64_t in extent code
1609 * XXX and 64-bit physical addresses in i386
1610 * XXX port.
1611 */
1612 seg_start = bim->entry[x].addr;
1613 seg_end = bim->entry[x].addr + bim->entry[x].size;
1615 /*
1616 * Avoid Compatibility Holes.
1617 * XXX Holes within memory space that allow access
1618 * XXX to be directed to the PC-compatible frame buffer
1619 * XXX (0xa0000-0xbffff),to adapter ROM space
1620 * XXX (0xc0000-0xdffff), and to system BIOS space
1621 * XXX (0xe0000-0xfffff).
1622 * XXX Some laptop(for example,Toshiba Satellite2550X)
1623 * XXX report this area and occurred problems,
1624 * XXX so we avoid this area.
1625 */
1626 if (seg_start < 0x100000 && seg_end > 0xa0000) {
1627 printf("WARNING: memory map entry overlaps "
1628 "with ``Compatibility Holes'': "
1629 "0x%qx/0x%qx/0x%x\n", seg_start,
1630 seg_end - seg_start, bim->entry[x].type);
1631 add_mem_cluster(seg_start, 0xa0000,
1632 bim->entry[x].type);
1633 add_mem_cluster(0x100000, seg_end,
1634 bim->entry[x].type);
1635 } else
1636 add_mem_cluster(seg_start, seg_end,
1637 bim->entry[x].type);
1640 #endif /* ! REALBASEMEM && ! REALEXTMEM */
1641 /*
1642 * If the loop above didn't find any valid segment, fall back to
1643 * former code.
1644 */
1645 if (mem_cluster_cnt == 0) {
1646 /*
1647 * Allocate the physical addresses used by RAM from the iomem
1648 * extent map. This is done before the addresses are
1649 * page rounded just to make sure we get them all.
1650 */
1651 if (extent_alloc_region(iomem_ex, 0, KBTOB(biosbasemem),
1652 EX_NOWAIT)) {
1653 /* XXX What should we do? */
1654 printf("WARNING: CAN'T ALLOCATE BASE MEMORY FROM "
1655 "IOMEM EXTENT MAP!\n");
1657 mem_clusters[0].start = 0;
1658 mem_clusters[0].size = trunc_page(KBTOB(biosbasemem));
1659 physmem += atop(mem_clusters[0].size);
1660 if (extent_alloc_region(iomem_ex, IOM_END, KBTOB(biosextmem),
1661 EX_NOWAIT)) {
1662 /* XXX What should we do? */
1663 printf("WARNING: CAN'T ALLOCATE EXTENDED MEMORY FROM "
1664 "IOMEM EXTENT MAP!\n");
1666 #if NISADMA > 0
1667 /*
1668 * Some motherboards/BIOSes remap the 384K of RAM that would
1669 * normally be covered by the ISA hole to the end of memory
1670 * so that it can be used. However, on a 16M system, this
1671 * would cause bounce buffers to be allocated and used.
1672 * This is not desirable behaviour, as more than 384K of
1673 * bounce buffers might be allocated. As a work-around,
1674 * we round memory down to the nearest 1M boundary if
1675 * we're using any isadma devices and the remapped memory
1676 * is what puts us over 16M.
1677 */
1678 if (biosextmem > (15*1024) && biosextmem < (16*1024)) {
1679 char pbuf[9];
1681 format_bytes(pbuf, sizeof(pbuf),
1682 biosextmem - (15*1024));
1683 printf("Warning: ignoring %s of remapped memory\n",
1684 pbuf);
1685 biosextmem = (15*1024);
1687 #endif
1688 mem_clusters[1].start = IOM_END;
1689 mem_clusters[1].size = trunc_page(KBTOB(biosextmem));
1690 physmem += atop(mem_clusters[1].size);
1692 mem_cluster_cnt = 2;
1694 avail_end = IOM_END + trunc_page(KBTOB(biosextmem));
1696 /*
1697 * If we have 16M of RAM or less, just put it all on
1698 * the default free list. Otherwise, put the first
1699 * 16M of RAM on a lower priority free list (so that
1700 * all of the ISA DMA'able memory won't be eaten up
1701 * first-off).
1702 */
1703 if (avail_end <= (16 * 1024 * 1024))
1704 first16q = VM_FREELIST_DEFAULT;
1705 else
1706 first16q = VM_FREELIST_FIRST16;
1708 /* Make sure the end of the space used by the kernel is rounded. */
1709 first_avail = round_page(first_avail);
1710 #endif
1712 XENPRINTK(("load the memory cluster %p(%d) - %p(%ld)\n",
1713 (void *)avail_start, (int)atop(avail_start),
1714 (void *)avail_end, (int)atop(avail_end)));
1715 uvm_page_physload(atop(avail_start), atop(avail_end),
1716 atop(avail_start), atop(avail_end),
1717 VM_FREELIST_DEFAULT);
1719 #if !defined(XEN)
1721 /*
1722 * Now, load the memory clusters (which have already been
1723 * rounded and truncated) into the VM system.
1725 * NOTE: WE ASSUME THAT MEMORY STARTS AT 0 AND THAT THE KERNEL
1726 * IS LOADED AT IOM_END (1M).
1727 */
1728 for (x = 0; x < mem_cluster_cnt; x++) {
1729 seg_start = mem_clusters[x].start;
1730 seg_end = mem_clusters[x].start + mem_clusters[x].size;
1731 seg_start1 = 0;
1732 seg_end1 = 0;
1734 /*
1735 * Skip memory before our available starting point.
1736 */
1737 if (seg_end <= avail_start)
1738 continue;
1740 if (avail_start >= seg_start && avail_start < seg_end) {
1741 if (seg_start != 0)
1742 panic("init386: memory doesn't start at 0");
1743 seg_start = avail_start;
1744 if (seg_start == seg_end)
1745 continue;
1748 /*
1749 * If this segment contains the kernel, split it
1750 * in two, around the kernel.
1751 */
1752 if (seg_start <= IOM_END && first_avail <= seg_end) {
1753 seg_start1 = first_avail;
1754 seg_end1 = seg_end;
1755 seg_end = IOM_END;
1758 /* First hunk */
1759 if (seg_start != seg_end) {
1760 if (seg_start < (16 * 1024 * 1024) &&
1761 first16q != VM_FREELIST_DEFAULT) {
1762 u_int64_t tmp;
1764 if (seg_end > (16 * 1024 * 1024))
1765 tmp = (16 * 1024 * 1024);
1766 else
1767 tmp = seg_end;
1769 if (tmp != seg_start) {
1770 #ifdef DEBUG_MEMLOAD
1771 printf("loading 0x%qx-0x%qx "
1772 "(0x%lx-0x%lx)\n",
1773 seg_start, tmp,
1774 atop(seg_start), atop(tmp));
1775 #endif
1776 uvm_page_physload(atop(seg_start),
1777 atop(tmp), atop(seg_start),
1778 atop(tmp), first16q);
1780 seg_start = tmp;
1783 if (seg_start != seg_end) {
1784 #ifdef DEBUG_MEMLOAD
1785 printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
1786 seg_start, seg_end,
1787 atop(seg_start), atop(seg_end));
1788 #endif
1789 uvm_page_physload(atop(seg_start),
1790 atop(seg_end), atop(seg_start),
1791 atop(seg_end), VM_FREELIST_DEFAULT);
1795 /* Second hunk */
1796 if (seg_start1 != seg_end1) {
1797 if (seg_start1 < (16 * 1024 * 1024) &&
1798 first16q != VM_FREELIST_DEFAULT) {
1799 u_int64_t tmp;
1801 if (seg_end1 > (16 * 1024 * 1024))
1802 tmp = (16 * 1024 * 1024);
1803 else
1804 tmp = seg_end1;
1806 if (tmp != seg_start1) {
1807 #ifdef DEBUG_MEMLOAD
1808 printf("loading 0x%qx-0x%qx "
1809 "(0x%lx-0x%lx)\n",
1810 seg_start1, tmp,
1811 atop(seg_start1), atop(tmp));
1812 #endif
1813 uvm_page_physload(atop(seg_start1),
1814 atop(tmp), atop(seg_start1),
1815 atop(tmp), first16q);
1817 seg_start1 = tmp;
1820 if (seg_start1 != seg_end1) {
1821 #ifdef DEBUG_MEMLOAD
1822 printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
1823 seg_start1, seg_end1,
1824 atop(seg_start1), atop(seg_end1));
1825 #endif
1826 uvm_page_physload(atop(seg_start1),
1827 atop(seg_end1), atop(seg_start1),
1828 atop(seg_end1), VM_FREELIST_DEFAULT);
1832 #endif
1834 /*
1835 * Steal memory for the message buffer (at end of core).
1836 */
1838 struct vm_physseg *vps;
1839 psize_t sz = round_page(MSGBUFSIZE);
1840 psize_t reqsz = sz;
1842 for (x = 0; x < vm_nphysseg; x++) {
1843 vps = &vm_physmem[x];
1844 if (ptoa(vps->avail_end) == avail_end)
1845 goto found;
1847 panic("init386: can't find end of memory");
1849 found:
1850 /* Shrink so it'll fit in the last segment. */
1851 if ((vps->avail_end - vps->avail_start) < atop(sz))
1852 sz = ptoa(vps->avail_end - vps->avail_start);
1854 vps->avail_end -= atop(sz);
1855 vps->end -= atop(sz);
1856 msgbuf_paddr = ptoa(vps->avail_end);
1858 /* Remove the last segment if it now has no pages. */
1859 if (vps->start == vps->end) {
1860 for (vm_nphysseg--; x < vm_nphysseg; x++)
1861 vm_physmem[x] = vm_physmem[x + 1];
1864 /* Now find where the new avail_end is. */
1865 for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
1866 if (vm_physmem[x].avail_end > avail_end)
1867 avail_end = vm_physmem[x].avail_end;
1868 avail_end = ptoa(avail_end);
1870 /* Warn if the message buffer had to be shrunk. */
1871 if (sz != reqsz)
1872 printf("WARNING: %ld bytes not available for msgbuf "
1873 "in last cluster (%ld used)\n", reqsz, sz);
1876 /*
1877 * install PT page for the first 4M if needed.
1878 */
1879 if (needs_earlier_install_pte0) {
1880 paddr_t paddr;
1881 #ifdef DIAGNOSTIC
1882 if (realmode_reserved_size < PAGE_SIZE) {
1883 panic("cannot steal memory for first 4M PT page.");
1885 #endif
1886 paddr=realmode_reserved_start+realmode_reserved_size-PAGE_SIZE;
1887 pmap_enter(pmap_kernel(), (vaddr_t)vtopte(0), paddr,
1888 VM_PROT_READ|VM_PROT_WRITE,
1889 PMAP_WIRED|VM_PROT_READ|VM_PROT_WRITE);
1890 pmap_update(pmap_kernel());
1891 /* make sure it is clean before using */
1892 memset(vtopte(0), 0, PAGE_SIZE);
1893 realmode_reserved_size -= PAGE_SIZE;
1896 #if NBIOSCALL > 0
1897 /*
1898 * this should be caught at kernel build time, but put it here
1899 * in case someone tries to fake it out...
1900 */
1901 #ifdef DIAGNOSTIC
1902 if (realmode_reserved_start > BIOSTRAMP_BASE ||
1903 (realmode_reserved_start+realmode_reserved_size) < (BIOSTRAMP_BASE+
1904 PAGE_SIZE)) {
1905 panic("cannot steal memory for PT page of bioscall.");
1907 if (biostramp_image_size > PAGE_SIZE)
1908 panic("biostramp_image_size too big: %x vs. %x",
1909 biostramp_image_size, PAGE_SIZE);
1910 #endif
1911 pmap_kenter_pa((vaddr_t)BIOSTRAMP_BASE, /* virtual */
1912 (paddr_t)BIOSTRAMP_BASE, /* physical */
1913 VM_PROT_ALL); /* protection */
1914 pmap_update(pmap_kernel());
1915 memcpy((caddr_t)BIOSTRAMP_BASE, biostramp_image, biostramp_image_size);
1916 #ifdef DEBUG_BIOSCALL
1917 printf("biostramp installed @ %x\n", BIOSTRAMP_BASE);
1918 #endif
1919 realmode_reserved_size -= PAGE_SIZE;
1920 realmode_reserved_start += PAGE_SIZE;
1921 #endif
1923 #if NACPI > 0
1924 /*
1925 * Steal memory for the acpi wake code
1926 */
1928 paddr_t paddr, p;
1929 psize_t sz;
1930 int npg;
1932 paddr = realmode_reserved_start;
1933 npg = acpi_md_get_npages_of_wakecode();
1934 sz = ptoa(npg);
1935 #ifdef DIAGNOSTIC
1936 if (realmode_reserved_size < sz) {
1937 panic("cannot steal memory for ACPI wake code.");
1939 #endif
1941 /* identical mapping */
1942 p = paddr;
1943 for (x=0; x<npg; x++) {
1944 printf("kenter: 0x%08X\n", (unsigned)p);
1945 pmap_kenter_pa((vaddr_t)p, p, VM_PROT_ALL);
1946 p += PAGE_SIZE;
1948 pmap_update(pmap_kernel());
1950 acpi_md_install_wakecode(paddr);
1952 realmode_reserved_size -= sz;
1953 realmode_reserved_start += sz;
1955 #endif
1957 pmap_enter(pmap_kernel(), idt_vaddr, idt_paddr,
1958 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED|VM_PROT_READ|VM_PROT_WRITE);
1959 pmap_update(pmap_kernel());
1960 memset((void *)idt_vaddr, 0, PAGE_SIZE);
1962 #if !defined(XEN)
1963 idt = (struct gate_descriptor *)idt_vaddr;
1964 #ifdef I586_CPU
1965 pmap_enter(pmap_kernel(), pentium_idt_vaddr, idt_paddr,
1966 VM_PROT_READ, PMAP_WIRED|VM_PROT_READ);
1967 pentium_idt = (union descriptor *)pentium_idt_vaddr;
1968 #endif
1969 #endif
1970 pmap_update(pmap_kernel());
1972 initgdt();
1974 HYPERVISOR_set_callbacks(
1975 GSEL(GCODE_SEL, SEL_KPL), (unsigned long)hypervisor_callback,
1976 GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback);
1978 #if !defined(XEN)
1979 tgdt = gdt;
1980 gdt = (union descriptor *)
1981 ((char *)idt + NIDT * sizeof (struct gate_descriptor));
1982 ldt = gdt + NGDT;
1984 memcpy(gdt, tgdt, NGDT*sizeof(*gdt));
1986 setsegment(&gdt[GLDT_SEL].sd, ldt, NLDT * sizeof(ldt[0]) - 1,
1987 SDT_SYSLDT, SEL_KPL, 0, 0);
1988 #else
1989 ldt = (union descriptor *)idt_vaddr;
1990 #endif
1992 /* make ldt gates and memory segments */
1993 setgate(&ldt[LSYS5CALLS_SEL].gd, &IDTVEC(osyscall), 1,
1994 SDT_SYS386CGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1996 ldt[LUCODE_SEL] = gdt[GUCODE_SEL];
1997 ldt[LUCODEBIG_SEL] = gdt[GUCODEBIG_SEL];
1998 ldt[LUDATA_SEL] = gdt[GUDATA_SEL];
1999 ldt[LSOL26CALLS_SEL] = ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2001 #if !defined(XEN)
2002 /* exceptions */
2003 for (x = 0; x < 32; x++) {
2004 setgate(&idt[x], IDTVEC(exceptions)[x], 0, SDT_SYS386TGT,
2005 (x == 3 || x == 4) ? SEL_UPL : SEL_KPL,
2006 GSEL(GCODE_SEL, SEL_KPL));
2007 idt_allocmap[x] = 1;
2010 /* new-style interrupt gate for syscalls */
2011 setgate(&idt[128], &IDTVEC(syscall), 0, SDT_SYS386TGT, SEL_UPL,
2012 GSEL(GCODE_SEL, SEL_KPL));
2013 idt_allocmap[128] = 1;
2014 #ifdef COMPAT_SVR4
2015 setgate(&idt[0xd2], &IDTVEC(svr4_fasttrap), 0, SDT_SYS386TGT,
2016 SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
2017 idt_allocmap[0xd2] = 1;
2018 #endif /* COMPAT_SVR4 */
2019 #endif
2021 memset(xen_idt, 0, sizeof(trap_info_t) * MAX_XEN_IDT);
2022 xen_idt_idx = 0;
2023 for (x = 0; x < 32; x++) {
2024 KASSERT(xen_idt_idx < MAX_XEN_IDT);
2025 xen_idt[xen_idt_idx].vector = x;
2026 xen_idt[xen_idt_idx].flags =
2027 (x == 3 || x == 4) ? SEL_UPL : SEL_XEN;
2028 xen_idt[xen_idt_idx].cs = GSEL(GCODE_SEL, SEL_KPL);
2029 xen_idt[xen_idt_idx].address =
2030 (uint32_t)IDTVEC(exceptions)[x];
2031 xen_idt_idx++;
2033 KASSERT(xen_idt_idx < MAX_XEN_IDT);
2034 xen_idt[xen_idt_idx].vector = 128;
2035 xen_idt[xen_idt_idx].flags = SEL_UPL;
2036 xen_idt[xen_idt_idx].cs = GSEL(GCODE_SEL, SEL_KPL);
2037 xen_idt[xen_idt_idx].address = (uint32_t)&IDTVEC(syscall);
2038 xen_idt_idx++;
2039 #ifdef COMPAT_SVR4
2040 KASSERT(xen_idt_idx < MAX_XEN_IDT);
2041 xen_idt[xen_idt_idx].vector = 0xd2;
2042 xen_idt[xen_idt_idx].flags = SEL_UPL;
2043 xen_idt[xen_idt_idx].cs = GSEL(GCODE_SEL, SEL_KPL);
2044 xen_idt[xen_idt_idx].address = (uint32_t)&IDTVEC(svr4_fasttrap);
2045 xen_idt_idx++;
2046 #endif /* COMPAT_SVR4 */
2048 #if !defined(XEN)
2049 setregion(&region, gdt, NGDT * sizeof(gdt[0]) - 1);
2050 lgdt(&region);
2051 #else
2052 lldt(GSEL(GLDT_SEL, SEL_KPL));
2053 #endif
2055 #if !defined(XEN)
2056 cpu_init_idt();
2057 #else
2058 db_trap_callback = ddb_trap_hook;
2060 XENPRINTF(("HYPERVISOR_set_trap_table %p\n", xen_idt));
2061 if (HYPERVISOR_set_trap_table(xen_idt))
2062 panic("HYPERVISOR_set_trap_table %p failed\n", xen_idt);
2063 #endif
2065 #if NKSYMS || defined(DDB) || defined(LKM)
2067 extern int end;
2068 struct btinfo_symtab *symtab;
2070 #ifdef DDB
2071 db_machine_init();
2072 #endif
2074 symtab = lookup_bootinfo(BTINFO_SYMTAB);
2076 if (symtab) {
2077 symtab->ssym += KERNBASE;
2078 symtab->esym += KERNBASE;
2079 ksyms_init(symtab->nsym, (int *)symtab->ssym,
2080 (int *)symtab->esym);
2082 else
2083 ksyms_init(*(int *)&end, ((int *)&end) + 1,
2084 xen_start_info.mod_start ?
2085 (void *)xen_start_info.mod_start :
2086 (void *)xen_start_info.mfn_list);
2088 #endif
2089 #ifdef DDB
2090 if (boothowto & RB_KDB)
2091 Debugger();
2092 #endif
2093 #ifdef IPKDB
2094 ipkdb_init();
2095 if (boothowto & RB_KDB)
2096 ipkdb_connect(0);
2097 #endif
2098 #ifdef KGDB
2099 kgdb_port_init();
2100 if (boothowto & RB_KDB) {
2101 kgdb_debug_init = 1;
2102 kgdb_connect(1);
2104 #endif
2106 #if NMCA > 0
2107 /* check for MCA bus, needed to be done before ISA stuff - if
2108 * MCA is detected, ISA needs to use level triggered interrupts
2109 * by default */
2110 mca_busprobe();
2111 #endif
2113 #if defined(XEN)
2114 events_default_setup();
2115 #else
2116 intr_default_setup();
2117 #endif
2119 /* Initialize software interrupts. */
2120 softintr_init();
2122 splraise(IPL_IPI);
2123 enable_intr();
2125 if (physmem < btoc(2 * 1024 * 1024)) {
2126 printf("warning: too little memory available; "
2127 "have %lu bytes, want %lu bytes\n"
2128 "running in degraded mode\n"
2129 "press a key to confirm\n\n",
2130 ptoa(physmem), 2*1024*1024UL);
2131 cngetc();
2134 #ifdef __HAVE_CPU_MAXPROC
2135 /* Make sure maxproc is sane */
2136 if (maxproc > cpu_maxproc())
2137 maxproc = cpu_maxproc();
2138 #endif
2141 #ifdef COMPAT_NOMID
2142 static int
2143 exec_nomid(struct proc *p, struct exec_package *epp)
2145 int error;
2146 u_long midmag, magic;
2147 u_short mid;
2148 struct exec *execp = epp->ep_hdr;
2150 /* check on validity of epp->ep_hdr performed by exec_out_makecmds */
2152 midmag = ntohl(execp->a_midmag);
2153 mid = (midmag >> 16) & 0xffff;
2154 magic = midmag & 0xffff;
2156 if (magic == 0) {
2157 magic = (execp->a_midmag & 0xffff);
2158 mid = MID_ZERO;
2161 midmag = mid << 16 | magic;
2163 switch (midmag) {
2164 case (MID_ZERO << 16) | ZMAGIC:
2165 /*
2166 * 386BSD's ZMAGIC format:
2167 */
2168 error = exec_aout_prep_oldzmagic(p, epp);
2169 break;
2171 case (MID_ZERO << 16) | QMAGIC:
2172 /*
2173 * BSDI's QMAGIC format:
2174 * same as new ZMAGIC format, but with different magic number
2175 */
2176 error = exec_aout_prep_zmagic(p, epp);
2177 break;
2179 case (MID_ZERO << 16) | NMAGIC:
2180 /*
2181 * BSDI's NMAGIC format:
2182 * same as NMAGIC format, but with different magic number
2183 * and with text starting at 0.
2184 */
2185 error = exec_aout_prep_oldnmagic(p, epp);
2186 break;
2188 case (MID_ZERO << 16) | OMAGIC:
2189 /*
2190 * BSDI's OMAGIC format:
2191 * same as OMAGIC format, but with different magic number
2192 * and with text starting at 0.
2193 */
2194 error = exec_aout_prep_oldomagic(p, epp);
2195 break;
2197 default:
2198 error = ENOEXEC;
2201 return error;
2203 #endif
2205 /*
2206 * cpu_exec_aout_makecmds():
2207 * CPU-dependent a.out format hook for execve().
2209 * Determine of the given exec package refers to something which we
2210 * understand and, if so, set up the vmcmds for it.
2212 * On the i386, old (386bsd) ZMAGIC binaries and BSDI QMAGIC binaries
2213 * if COMPAT_NOMID is given as a kernel option.
2214 */
2215 int
2216 cpu_exec_aout_makecmds(struct proc *p, struct exec_package *epp)
2218 int error = ENOEXEC;
2220 #ifdef COMPAT_NOMID
2221 if ((error = exec_nomid(p, epp)) == 0)
2222 return error;
2223 #endif /* ! COMPAT_NOMID */
2225 return error;
2228 void *
2229 lookup_bootinfo(int type)
2231 struct btinfo_common *help;
2232 int n = *(int*)bootinfo;
2233 help = (struct btinfo_common *)(bootinfo + sizeof(int));
2234 while(n--) {
2235 if(help->type == type)
2236 return(help);
2237 help = (struct btinfo_common *)((char*)help + help->len);
2239 return(0);
2242 #include <dev/ic/mc146818reg.h> /* for NVRAM POST */
2243 #include <i386/isa/nvram.h> /* for NVRAM POST */
2245 void
2246 cpu_reset()
2249 disable_intr();
2251 #if 0
2252 /*
2253 * Ensure the NVRAM reset byte contains something vaguely sane.
2254 */
2256 outb(IO_RTC, NVRAM_RESET);
2257 outb(IO_RTC+1, NVRAM_RESET_RST);
2259 /*
2260 * The keyboard controller has 4 random output pins, one of which is
2261 * connected to the RESET pin on the CPU in many PCs. We tell the
2262 * keyboard controller to pulse this line a couple of times.
2263 */
2264 outb(IO_KBD + KBCMDP, KBC_PULSE0);
2265 delay(100000);
2266 outb(IO_KBD + KBCMDP, KBC_PULSE0);
2267 delay(100000);
2268 #endif
2270 HYPERVISOR_reboot();
2272 for (;;);
2275 void
2276 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
2278 const struct trapframe *tf = l->l_md.md_regs;
2279 __greg_t *gr = mcp->__gregs;
2280 __greg_t ras_eip;
2282 /* Save register context. */
2283 #ifdef VM86
2284 if (tf->tf_eflags & PSL_VM) {
2285 gr[_REG_GS] = tf->tf_vm86_gs;
2286 gr[_REG_FS] = tf->tf_vm86_fs;
2287 gr[_REG_ES] = tf->tf_vm86_es;
2288 gr[_REG_DS] = tf->tf_vm86_ds;
2289 gr[_REG_EFL] = get_vflags(l);
2290 } else
2291 #endif
2293 gr[_REG_GS] = tf->tf_gs;
2294 gr[_REG_FS] = tf->tf_fs;
2295 gr[_REG_ES] = tf->tf_es;
2296 gr[_REG_DS] = tf->tf_ds;
2297 gr[_REG_EFL] = tf->tf_eflags;
2299 gr[_REG_EDI] = tf->tf_edi;
2300 gr[_REG_ESI] = tf->tf_esi;
2301 gr[_REG_EBP] = tf->tf_ebp;
2302 gr[_REG_EBX] = tf->tf_ebx;
2303 gr[_REG_EDX] = tf->tf_edx;
2304 gr[_REG_ECX] = tf->tf_ecx;
2305 gr[_REG_EAX] = tf->tf_eax;
2306 gr[_REG_EIP] = tf->tf_eip;
2307 gr[_REG_CS] = tf->tf_cs;
2308 gr[_REG_ESP] = tf->tf_esp;
2309 gr[_REG_UESP] = tf->tf_esp;
2310 gr[_REG_SS] = tf->tf_ss;
2311 gr[_REG_TRAPNO] = tf->tf_trapno;
2312 gr[_REG_ERR] = tf->tf_err;
2314 if ((ras_eip = (__greg_t)ras_lookup(l->l_proc,
2315 (caddr_t) gr[_REG_EIP])) != -1)
2316 gr[_REG_EIP] = ras_eip;
2318 *flags |= _UC_CPU;
2320 /* Save floating point register context, if any. */
2321 if ((l->l_md.md_flags & MDL_USEDFPU) != 0) {
2322 #if NNPX > 0
2323 /*
2324 * If this process is the current FP owner, dump its
2325 * context to the PCB first.
2326 * XXX npxsave() also clears the FPU state; depending on the
2327 * XXX application this might be a penalty.
2328 */
2329 if (l->l_addr->u_pcb.pcb_fpcpu) {
2330 npxsave_lwp(l, 1);
2332 #endif
2333 if (i386_use_fxsave) {
2334 memcpy(&mcp->__fpregs.__fp_reg_set.__fp_xmm_state.__fp_xmm,
2335 &l->l_addr->u_pcb.pcb_savefpu.sv_xmm,
2336 sizeof (mcp->__fpregs.__fp_reg_set.__fp_xmm_state.__fp_xmm));
2337 *flags |= _UC_FXSAVE;
2338 } else {
2339 memcpy(&mcp->__fpregs.__fp_reg_set.__fpchip_state.__fp_state,
2340 &l->l_addr->u_pcb.pcb_savefpu.sv_87,
2341 sizeof (mcp->__fpregs.__fp_reg_set.__fpchip_state.__fp_state));
2343 #if 0
2344 /* Apparently nothing ever touches this. */
2345 ucp->mcp.mc_fp.fp_emcsts = l->l_addr->u_pcb.pcb_saveemc;
2346 #endif
2347 *flags |= _UC_FPU;
2351 int
2352 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
2354 struct trapframe *tf = l->l_md.md_regs;
2355 __greg_t *gr = mcp->__gregs;
2357 /* Restore register context, if any. */
2358 if ((flags & _UC_CPU) != 0) {
2359 #ifdef VM86
2360 if (gr[_REG_EFL] & PSL_VM) {
2361 tf->tf_vm86_gs = gr[_REG_GS];
2362 tf->tf_vm86_fs = gr[_REG_FS];
2363 tf->tf_vm86_es = gr[_REG_ES];
2364 tf->tf_vm86_ds = gr[_REG_DS];
2365 set_vflags(l, gr[_REG_EFL]);
2366 if (flags & _UC_VM) {
2367 void syscall_vm86(struct trapframe *);
2368 l->l_proc->p_md.md_syscall = syscall_vm86;
2370 } else
2371 #endif
2373 /*
2374 * Check for security violations. If we're returning
2375 * to protected mode, the CPU will validate the segment
2376 * registers automatically and generate a trap on
2377 * violations. We handle the trap, rather than doing
2378 * all of the checking here.
2379 */
2380 if (((gr[_REG_EFL] ^ tf->tf_eflags) & PSL_USERSTATIC) ||
2381 !USERMODE(gr[_REG_CS], gr[_REG_EFL])) {
2382 printf("cpu_setmcontext error: uc EFL: 0x%08x"
2383 " tf EFL: 0x%08x uc CS: 0x%x\n",
2384 gr[_REG_EFL], tf->tf_eflags, gr[_REG_CS]);
2385 return (EINVAL);
2387 tf->tf_gs = gr[_REG_GS];
2388 tf->tf_fs = gr[_REG_FS];
2389 tf->tf_es = gr[_REG_ES];
2390 tf->tf_ds = gr[_REG_DS];
2391 /* Only change the user-alterable part of eflags */
2392 tf->tf_eflags &= ~PSL_USER;
2393 tf->tf_eflags |= (gr[_REG_EFL] & PSL_USER);
2395 tf->tf_edi = gr[_REG_EDI];
2396 tf->tf_esi = gr[_REG_ESI];
2397 tf->tf_ebp = gr[_REG_EBP];
2398 tf->tf_ebx = gr[_REG_EBX];
2399 tf->tf_edx = gr[_REG_EDX];
2400 tf->tf_ecx = gr[_REG_ECX];
2401 tf->tf_eax = gr[_REG_EAX];
2402 tf->tf_eip = gr[_REG_EIP];
2403 tf->tf_cs = gr[_REG_CS];
2404 tf->tf_esp = gr[_REG_UESP];
2405 tf->tf_ss = gr[_REG_SS];
2408 /* Restore floating point register context, if any. */
2409 if ((flags & _UC_FPU) != 0) {
2410 #if NNPX > 0
2411 /*
2412 * If we were using the FPU, forget that we were.
2413 */
2414 if (l->l_addr->u_pcb.pcb_fpcpu != NULL)
2415 npxsave_lwp(l, 0);
2416 #endif
2417 if (flags & _UC_FXSAVE) {
2418 if (i386_use_fxsave) {
2419 memcpy(
2420 &l->l_addr->u_pcb.pcb_savefpu.sv_xmm,
2421 &mcp->__fpregs.__fp_reg_set.__fp_xmm_state.__fp_xmm,
2422 sizeof (&l->l_addr->u_pcb.pcb_savefpu.sv_xmm));
2423 } else {
2424 /* This is a weird corner case */
2425 process_xmm_to_s87((struct savexmm *)
2426 &mcp->__fpregs.__fp_reg_set.__fp_xmm_state.__fp_xmm,
2427 &l->l_addr->u_pcb.pcb_savefpu.sv_87);
2429 } else {
2430 if (i386_use_fxsave) {
2431 process_s87_to_xmm((struct save87 *)
2432 &mcp->__fpregs.__fp_reg_set.__fpchip_state.__fp_state,
2433 &l->l_addr->u_pcb.pcb_savefpu.sv_xmm);
2434 } else {
2435 memcpy(&l->l_addr->u_pcb.pcb_savefpu.sv_87,
2436 &mcp->__fpregs.__fp_reg_set.__fpchip_state.__fp_state,
2437 sizeof (l->l_addr->u_pcb.pcb_savefpu.sv_87));
2440 /* If not set already. */
2441 l->l_md.md_flags |= MDL_USEDFPU;
2442 #if 0
2443 /* Apparently unused. */
2444 l->l_addr->u_pcb.pcb_saveemc = mcp->mc_fp.fp_emcsts;
2445 #endif
2447 if (flags & _UC_SETSTACK)
2448 l->l_proc->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
2449 if (flags & _UC_CLRSTACK)
2450 l->l_proc->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK;
2451 return (0);
2454 void
2455 cpu_initclocks()
2457 (*initclock_func)();
2460 #ifdef MULTIPROCESSOR
2461 void
2462 need_resched(struct cpu_info *ci)
2465 if (ci->ci_want_resched)
2466 return;
2468 ci->ci_want_resched = 1;
2469 if ((ci)->ci_curlwp != NULL)
2470 aston((ci)->ci_curlwp->l_proc);
2471 else if (ci != curcpu())
2472 x86_send_ipi(ci, 0);
2474 #endif
2476 /*
2477 * Allocate an IDT vector slot within the given range.
2478 * XXX needs locking to avoid MP allocation races.
2479 */
2481 int
2482 idt_vec_alloc(int low, int high)
2484 int vec;
2486 simple_lock(&idt_lock);
2487 for (vec = low; vec <= high; vec++) {
2488 if (idt_allocmap[vec] == 0) {
2489 idt_allocmap[vec] = 1;
2490 simple_unlock(&idt_lock);
2491 return vec;
2494 simple_unlock(&idt_lock);
2495 return 0;
2498 void
2499 idt_vec_set(int vec, void (*function)(void))
2501 /*
2502 * Vector should be allocated, so no locking needed.
2503 */
2504 KASSERT(idt_allocmap[vec] == 1);
2505 setgate(&idt[vec], function, 0, SDT_SYS386IGT, SEL_KPL,
2506 GSEL(GCODE_SEL, SEL_KPL));
2509 void
2510 idt_vec_free(int vec)
2512 simple_lock(&idt_lock);
2513 unsetgate(&idt[vec]);
2514 idt_allocmap[vec] = 0;
2515 simple_unlock(&idt_lock);
2518 /*
2519 * Number of processes is limited by number of available GDT slots.
2520 */
2521 int
2522 cpu_maxproc(void)
2524 #ifdef USER_LDT
2525 return ((MAXGDTSIZ - NGDT) / 2);
2526 #else
2527 return (MAXGDTSIZ - NGDT);
2528 #endif
2531 #if defined(DDB) || defined(KGDB)
2533 /*
2534 * Callback to output a backtrace when entering ddb.
2535 */
2536 void
2537 ddb_trap_hook(int where)
2539 static int once = 0;
2540 db_addr_t db_dot;
2542 if (once != 0 || where != 1)
2543 return;
2544 once = 1;
2546 if (curlwp != NULL) {
2547 db_printf("Stopped");
2548 if (curproc == NULL)
2549 db_printf("; curlwp = %p,"
2550 " curproc is NULL at\t", curlwp);
2551 else
2552 db_printf(" in pid %d.%d (%s) at\t",
2553 curproc->p_pid, curlwp->l_lid,
2554 curproc->p_comm);
2555 } else
2556 db_printf("Stopped at\t");
2557 db_dot = PC_REGS(DDB_REGS);
2558 db_print_loc_and_inst(db_dot);
2560 db_stack_trace_print((db_expr_t) db_dot, FALSE, 65535,
2561 "", db_printf);
2562 #ifdef DEBUG
2563 db_show_regs((db_expr_t) db_dot, FALSE, 65535, "");
2564 #endif
2567 #endif /* DDB || KGDB */