ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/x86_64/kernel/process.c @ 6538:84ee014ebd41

Merge xen-vtx-unstable.hg
author adsharma@los-vmm.sc.intel.com
date Wed Aug 17 12:34:38 2005 -0800 (2005-08-17)
parents 23979fb12c49 60a8a5ca4291
children 99914b54f7bf
line source
1 /*
2 * linux/arch/x86-64/kernel/process.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 *
9 * X86-64 port
10 * Andi Kleen.
11 *
12 * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
13 *
14 * Jun Nakajima <jun.nakajima@intel.com>
15 * Modified for Xen
16 */
18 /*
19 * This file handles the architecture-dependent parts of process handling..
20 */
22 #include <stdarg.h>
24 #include <linux/cpu.h>
25 #include <linux/errno.h>
26 #include <linux/sched.h>
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/elfcore.h>
30 #include <linux/smp.h>
31 #include <linux/slab.h>
32 #include <linux/user.h>
33 #include <linux/module.h>
34 #include <linux/a.out.h>
35 #include <linux/interrupt.h>
36 #include <linux/delay.h>
37 #include <linux/irq.h>
38 #include <linux/ptrace.h>
39 #include <linux/utsname.h>
40 #include <linux/random.h>
42 #include <asm/uaccess.h>
43 #include <asm/pgtable.h>
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/processor.h>
47 #include <asm/i387.h>
48 #include <asm/mmu_context.h>
49 #include <asm/pda.h>
50 #include <asm/prctl.h>
51 #include <asm/kdebug.h>
52 #include <asm-xen/xen-public/dom0_ops.h>
53 #include <asm-xen/xen-public/physdev.h>
54 #include <asm/desc.h>
55 #include <asm/proto.h>
56 #include <asm/hardirq.h>
57 #include <asm/ia32.h>
59 asmlinkage extern void ret_from_fork(void);
61 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
63 static atomic_t hlt_counter = ATOMIC_INIT(0);
65 unsigned long boot_option_idle_override = 0;
66 EXPORT_SYMBOL(boot_option_idle_override);
68 /*
69 * Powermanagement idle function, if any..
70 */
71 void (*pm_idle)(void);
72 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
74 void disable_hlt(void)
75 {
76 atomic_inc(&hlt_counter);
77 }
79 EXPORT_SYMBOL(disable_hlt);
81 void enable_hlt(void)
82 {
83 atomic_dec(&hlt_counter);
84 }
86 EXPORT_SYMBOL(enable_hlt);
88 /* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
89 extern void stop_hz_timer(void);
90 extern void start_hz_timer(void);
91 void xen_idle(void)
92 {
93 local_irq_disable();
95 if (need_resched()) {
96 local_irq_enable();
97 } else {
98 stop_hz_timer();
99 HYPERVISOR_block(); /* implicit local_irq_enable() */
100 start_hz_timer();
101 }
102 }
104 #ifdef CONFIG_HOTPLUG_CPU
105 #include <asm/nmi.h>
106 /* We don't actually take CPU down, just spin without interrupts. */
107 static inline void play_dead(void)
108 {
109 /* Ack it */
110 __get_cpu_var(cpu_state) = CPU_DEAD;
112 /* We shouldn't have to disable interrupts while dead, but
113 * some interrupts just don't seem to go away, and this makes
114 * it "work" for testing purposes. */
115 /* Death loop */
116 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
117 HYPERVISOR_yield();
119 local_irq_disable();
120 __flush_tlb_all();
121 cpu_set(smp_processor_id(), cpu_online_map);
122 local_irq_enable();
123 }
124 #else
125 static inline void play_dead(void)
126 {
127 BUG();
128 }
129 #endif /* CONFIG_HOTPLUG_CPU */
131 /*
132 * The idle thread. There's no useful work to be
133 * done, so just try to conserve power and have a
134 * low exit latency (ie sit in a loop waiting for
135 * somebody to say that they'd like to reschedule)
136 */
137 void cpu_idle (void)
138 {
139 int cpu = smp_processor_id();
141 /* endless idle loop with no priority at all */
142 while (1) {
143 while (!need_resched()) {
144 if (__get_cpu_var(cpu_idle_state))
145 __get_cpu_var(cpu_idle_state) = 0;
146 rmb();
148 if (cpu_is_offline(cpu))
149 play_dead();
151 xen_idle();
152 }
154 schedule();
155 }
156 }
158 void cpu_idle_wait(void)
159 {
160 unsigned int cpu, this_cpu = get_cpu();
161 cpumask_t map;
163 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
164 put_cpu();
166 cpus_clear(map);
167 for_each_online_cpu(cpu) {
168 per_cpu(cpu_idle_state, cpu) = 1;
169 cpu_set(cpu, map);
170 }
172 __get_cpu_var(cpu_idle_state) = 0;
174 wmb();
175 do {
176 ssleep(1);
177 for_each_online_cpu(cpu) {
178 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
179 cpu_clear(cpu, map);
180 }
181 cpus_and(map, map, cpu_online_map);
182 } while (!cpus_empty(map));
183 }
184 EXPORT_SYMBOL_GPL(cpu_idle_wait);
186 /* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
187 /* Always use xen_idle() instead. */
188 void __init select_idle_routine(const struct cpuinfo_x86 *c) {}
190 /* Prints also some state that isn't saved in the pt_regs */
191 void __show_regs(struct pt_regs * regs)
192 {
193 unsigned long fs, gs, shadowgs;
194 unsigned int fsindex,gsindex;
195 unsigned int ds,cs,es;
197 printk("\n");
198 print_modules();
199 printk("Pid: %d, comm: %.20s %s %s\n",
200 current->pid, current->comm, print_tainted(), system_utsname.release);
201 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
202 printk_address(regs->rip);
203 printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags);
204 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
205 regs->rax, regs->rbx, regs->rcx);
206 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
207 regs->rdx, regs->rsi, regs->rdi);
208 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
209 regs->rbp, regs->r8, regs->r9);
210 printk("R10: %016lx R11: %016lx R12: %016lx\n",
211 regs->r10, regs->r11, regs->r12);
212 printk("R13: %016lx R14: %016lx R15: %016lx\n",
213 regs->r13, regs->r14, regs->r15);
215 asm("mov %%ds,%0" : "=r" (ds));
216 asm("mov %%cs,%0" : "=r" (cs));
217 asm("mov %%es,%0" : "=r" (es));
218 asm("mov %%fs,%0" : "=r" (fsindex));
219 asm("mov %%gs,%0" : "=r" (gsindex));
221 rdmsrl(MSR_FS_BASE, fs);
222 rdmsrl(MSR_GS_BASE, gs);
223 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
225 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
226 fs,fsindex,gs,gsindex,shadowgs);
227 printk("CS: %04x DS: %04x ES: %04x\n", cs, ds, es);
229 }
231 void show_regs(struct pt_regs *regs)
232 {
233 __show_regs(regs);
234 show_trace(&regs->rsp);
235 }
237 /*
238 * Free current thread data structures etc..
239 */
240 void exit_thread(void)
241 {
242 struct task_struct *me = current;
243 struct thread_struct *t = &me->thread;
244 if (me->thread.io_bitmap_ptr) {
245 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
247 kfree(t->io_bitmap_ptr);
248 t->io_bitmap_ptr = NULL;
249 /*
250 * Careful, clear this in the TSS too:
251 */
252 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
253 t->io_bitmap_max = 0;
254 put_cpu();
255 }
256 }
258 void load_gs_index(unsigned gs)
259 {
260 HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs);
261 }
263 void flush_thread(void)
264 {
265 struct task_struct *tsk = current;
266 struct thread_info *t = current_thread_info();
268 if (t->flags & _TIF_ABI_PENDING)
269 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
271 tsk->thread.debugreg0 = 0;
272 tsk->thread.debugreg1 = 0;
273 tsk->thread.debugreg2 = 0;
274 tsk->thread.debugreg3 = 0;
275 tsk->thread.debugreg6 = 0;
276 tsk->thread.debugreg7 = 0;
277 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
278 /*
279 * Forget coprocessor state..
280 */
281 clear_fpu(tsk);
282 clear_used_math();
283 }
285 void release_thread(struct task_struct *dead_task)
286 {
287 if (dead_task->mm) {
288 if (dead_task->mm->context.size) {
289 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
290 dead_task->comm,
291 dead_task->mm->context.ldt,
292 dead_task->mm->context.size);
293 BUG();
294 }
295 }
296 }
298 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
299 {
300 struct user_desc ud = {
301 .base_addr = addr,
302 .limit = 0xfffff,
303 .contents = (3 << 3), /* user */
304 .seg_32bit = 1,
305 .limit_in_pages = 1,
306 .useable = 1,
307 };
308 struct n_desc_struct *desc = (void *)t->thread.tls_array;
309 desc += tls;
310 desc->a = LDT_entry_a(&ud);
311 desc->b = LDT_entry_b(&ud);
312 }
314 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
315 {
316 struct desc_struct *desc = (void *)t->thread.tls_array;
317 desc += tls;
318 return desc->base0 |
319 (((u32)desc->base1) << 16) |
320 (((u32)desc->base2) << 24);
321 }
323 /*
324 * This gets called before we allocate a new thread and copy
325 * the current task into it.
326 */
327 void prepare_to_copy(struct task_struct *tsk)
328 {
329 unlazy_fpu(tsk);
330 }
332 int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
333 unsigned long unused,
334 struct task_struct * p, struct pt_regs * regs)
335 {
336 int err;
337 struct pt_regs * childregs;
338 struct task_struct *me = current;
340 childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
342 *childregs = *regs;
344 childregs->rax = 0;
345 childregs->rsp = rsp;
346 if (rsp == ~0UL) {
347 childregs->rsp = (unsigned long)childregs;
348 }
350 p->thread.rsp = (unsigned long) childregs;
351 p->thread.rsp0 = (unsigned long) (childregs+1);
352 p->thread.userrsp = me->thread.userrsp;
354 set_ti_thread_flag(p->thread_info, TIF_FORK);
356 p->thread.fs = me->thread.fs;
357 p->thread.gs = me->thread.gs;
359 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
360 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
361 asm("mov %%es,%0" : "=m" (p->thread.es));
362 asm("mov %%ds,%0" : "=m" (p->thread.ds));
364 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
365 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
366 if (!p->thread.io_bitmap_ptr) {
367 p->thread.io_bitmap_max = 0;
368 return -ENOMEM;
369 }
370 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES);
371 }
373 /*
374 * Set a new TLS for the child thread?
375 */
376 if (clone_flags & CLONE_SETTLS) {
377 #ifdef CONFIG_IA32_EMULATION
378 if (test_thread_flag(TIF_IA32))
379 err = ia32_child_tls(p, childregs);
380 else
381 #endif
382 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
383 if (err)
384 goto out;
385 }
386 p->thread.io_pl = current->thread.io_pl;
388 err = 0;
389 out:
390 if (err && p->thread.io_bitmap_ptr) {
391 kfree(p->thread.io_bitmap_ptr);
392 p->thread.io_bitmap_max = 0;
393 }
394 return err;
395 }
397 /*
398 * This special macro can be used to load a debugging register
399 */
400 #define loaddebug(thread,register) \
401 HYPERVISOR_set_debugreg((register), \
402 (thread->debugreg ## register))
405 static inline void __save_init_fpu( struct task_struct *tsk )
406 {
407 asm volatile( "rex64 ; fxsave %0 ; fnclex"
408 : "=m" (tsk->thread.i387.fxsave));
409 tsk->thread_info->status &= ~TS_USEDFPU;
410 }
412 /*
413 * switch_to(x,y) should switch tasks from x to y.
414 *
415 * This could still be optimized:
416 * - fold all the options into a flag word and test it with a single test.
417 * - could test fs/gs bitsliced
418 */
419 struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
420 {
421 struct thread_struct *prev = &prev_p->thread,
422 *next = &next_p->thread;
423 int cpu = smp_processor_id();
424 struct tss_struct *tss = &per_cpu(init_tss, cpu);
425 physdev_op_t iopl_op, iobmp_op;
426 multicall_entry_t _mcl[8], *mcl = _mcl;
428 /*
429 * This is basically '__unlazy_fpu', except that we queue a
430 * multicall to indicate FPU task switch, rather than
431 * synchronously trapping to Xen.
432 */
433 if (prev_p->thread_info->status & TS_USEDFPU) {
434 __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
435 mcl->op = __HYPERVISOR_fpu_taskswitch;
436 mcl->args[0] = 1;
437 mcl++;
438 }
440 /*
441 * Reload esp0, LDT and the page table pointer:
442 */
443 tss->rsp0 = next->rsp0;
444 mcl->op = __HYPERVISOR_stack_switch;
445 mcl->args[0] = __KERNEL_DS;
446 mcl->args[1] = tss->rsp0;
447 mcl++;
449 /*
450 * Load the per-thread Thread-Local Storage descriptor.
451 * This is load_TLS(next, cpu) with multicalls.
452 */
453 #define C(i) do { \
454 if (unlikely(next->tls_array[i] != prev->tls_array[i])) { \
455 mcl->op = __HYPERVISOR_update_descriptor; \
456 mcl->args[0] = virt_to_machine(&get_cpu_gdt_table(cpu) \
457 [GDT_ENTRY_TLS_MIN + i]); \
458 mcl->args[1] = next->tls_array[i]; \
459 mcl++; \
460 } \
461 } while (0)
462 C(0); C(1); C(2);
463 #undef C
465 if (unlikely(prev->io_pl != next->io_pl)) {
466 iopl_op.cmd = PHYSDEVOP_SET_IOPL;
467 iopl_op.u.set_iopl.iopl = (next->io_pl == 0) ? 1 : next->io_pl;
468 mcl->op = __HYPERVISOR_physdev_op;
469 mcl->args[0] = (unsigned long)&iopl_op;
470 mcl++;
471 }
473 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
474 iobmp_op.cmd =
475 PHYSDEVOP_SET_IOBITMAP;
476 iobmp_op.u.set_iobitmap.bitmap =
477 (unsigned long)next->io_bitmap_ptr;
478 iobmp_op.u.set_iobitmap.nr_ports =
479 next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
480 mcl->op = __HYPERVISOR_physdev_op;
481 mcl->args[0] = (unsigned long)&iobmp_op;
482 mcl++;
483 }
485 (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
486 /*
487 * Switch DS and ES.
488 * This won't pick up thread selector changes, but I guess that is ok.
489 */
490 if (unlikely(next->es))
491 loadsegment(es, next->es);
493 if (unlikely(next->ds))
494 loadsegment(ds, next->ds);
496 /*
497 * Switch FS and GS.
498 */
499 if (unlikely(next->fsindex))
500 loadsegment(fs, next->fsindex);
502 if (next->fs)
503 HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs);
505 if (unlikely(next->gsindex))
506 load_gs_index(next->gsindex);
508 if (next->gs)
509 HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs);
511 /*
512 * Switch the PDA context.
513 */
514 prev->userrsp = read_pda(oldrsp);
515 write_pda(oldrsp, next->userrsp);
516 write_pda(pcurrent, next_p);
517 write_pda(kernelstack, (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
519 /*
520 * Now maybe reload the debug registers
521 */
522 if (unlikely(next->debugreg7)) {
523 loaddebug(next, 0);
524 loaddebug(next, 1);
525 loaddebug(next, 2);
526 loaddebug(next, 3);
527 /* no 4 and 5 */
528 loaddebug(next, 6);
529 loaddebug(next, 7);
530 }
532 return prev_p;
533 }
535 /*
536 * sys_execve() executes a new program.
537 */
538 asmlinkage
539 long sys_execve(char __user *name, char __user * __user *argv,
540 char __user * __user *envp, struct pt_regs regs)
541 {
542 long error;
543 char * filename;
545 filename = getname(name);
546 error = PTR_ERR(filename);
547 if (IS_ERR(filename))
548 return error;
549 error = do_execve(filename, argv, envp, &regs);
550 if (error == 0) {
551 task_lock(current);
552 current->ptrace &= ~PT_DTRACE;
553 task_unlock(current);
554 }
555 putname(filename);
556 return error;
557 }
559 void set_personality_64bit(void)
560 {
561 /* inherit personality from parent */
563 /* Make sure to be in 64bit mode */
564 clear_thread_flag(TIF_IA32);
566 /* TBD: overwrites user setup. Should have two bits.
567 But 64bit processes have always behaved this way,
568 so it's not too bad. The main problem is just that
569 32bit childs are affected again. */
570 current->personality &= ~READ_IMPLIES_EXEC;
571 }
573 asmlinkage long sys_fork(struct pt_regs *regs)
574 {
575 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
576 }
578 asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
579 {
580 if (!newsp)
581 newsp = regs->rsp;
582 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
583 }
585 /*
586 * This is trivial, and on the face of it looks like it
587 * could equally well be done in user mode.
588 *
589 * Not so, for quite unobvious reasons - register pressure.
590 * In user mode vfork() cannot have a stack frame, and if
591 * done by calling the "clone()" system call directly, you
592 * do not have enough call-clobbered registers to hold all
593 * the information you need.
594 */
595 asmlinkage long sys_vfork(struct pt_regs *regs)
596 {
597 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
598 NULL, NULL);
599 }
601 unsigned long get_wchan(struct task_struct *p)
602 {
603 unsigned long stack;
604 u64 fp,rip;
605 int count = 0;
607 if (!p || p == current || p->state==TASK_RUNNING)
608 return 0;
609 stack = (unsigned long)p->thread_info;
610 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
611 return 0;
612 fp = *(u64 *)(p->thread.rsp);
613 do {
614 if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE)
615 return 0;
616 rip = *(u64 *)(fp+8);
617 if (!in_sched_functions(rip))
618 return rip;
619 fp = *(u64 *)fp;
620 } while (count++ < 16);
621 return 0;
622 }
624 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
625 {
626 int ret = 0;
627 int doit = task == current;
628 int cpu;
630 switch (code) {
631 case ARCH_SET_GS:
632 if (addr >= TASK_SIZE)
633 return -EPERM;
634 cpu = get_cpu();
635 /* handle small bases via the GDT because that's faster to
636 switch. */
637 if (addr <= 0xffffffff) {
638 set_32bit_tls(task, GS_TLS, addr);
639 if (doit) {
640 load_TLS(&task->thread, cpu);
641 load_gs_index(GS_TLS_SEL);
642 }
643 task->thread.gsindex = GS_TLS_SEL;
644 task->thread.gs = 0;
645 } else {
646 task->thread.gsindex = 0;
647 task->thread.gs = addr;
648 if (doit) {
649 load_gs_index(0);
650 ret = HYPERVISOR_set_segment_base(SEGBASE_GS_USER, addr);
651 }
652 }
653 put_cpu();
654 break;
655 case ARCH_SET_FS:
656 /* Not strictly needed for fs, but do it for symmetry
657 with gs */
658 if (addr >= TASK_SIZE)
659 return -EPERM;
660 cpu = get_cpu();
661 /* handle small bases via the GDT because that's faster to
662 switch. */
663 if (addr <= 0xffffffff) {
664 set_32bit_tls(task, FS_TLS, addr);
665 if (doit) {
666 load_TLS(&task->thread, cpu);
667 asm volatile("mov %0,%%fs" :: "r" (FS_TLS_SEL));
668 }
669 task->thread.fsindex = FS_TLS_SEL;
670 task->thread.fs = 0;
671 } else {
672 task->thread.fsindex = 0;
673 task->thread.fs = addr;
674 if (doit) {
675 /* set the selector to 0 to not confuse
676 __switch_to */
677 asm volatile("mov %0,%%fs" :: "r" (0));
678 ret = HYPERVISOR_set_segment_base(SEGBASE_FS, addr);
680 }
681 }
682 put_cpu();
683 break;
684 case ARCH_GET_FS: {
685 unsigned long base;
686 if (task->thread.fsindex == FS_TLS_SEL)
687 base = read_32bit_tls(task, FS_TLS);
688 else if (doit) {
689 rdmsrl(MSR_FS_BASE, base);
690 } else
691 base = task->thread.fs;
692 ret = put_user(base, (unsigned long __user *)addr);
693 break;
694 }
695 case ARCH_GET_GS: {
696 unsigned long base;
697 if (task->thread.gsindex == GS_TLS_SEL)
698 base = read_32bit_tls(task, GS_TLS);
699 else if (doit) {
700 rdmsrl(MSR_KERNEL_GS_BASE, base);
701 } else
702 base = task->thread.gs;
703 ret = put_user(base, (unsigned long __user *)addr);
704 break;
705 }
707 default:
708 ret = -EINVAL;
709 break;
710 }
712 return ret;
713 }
715 long sys_arch_prctl(int code, unsigned long addr)
716 {
717 return do_arch_prctl(current, code, addr);
718 }
720 /*
721 * Capture the user space registers if the task is not running (in user space)
722 */
723 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
724 {
725 struct pt_regs *pp, ptregs;
727 pp = (struct pt_regs *)(tsk->thread.rsp0);
728 --pp;
730 ptregs = *pp;
731 ptregs.cs &= 0xffff;
732 ptregs.ss &= 0xffff;
734 elf_core_copy_regs(regs, &ptregs);
736 boot_option_idle_override = 1;
737 return 1;
738 }
740 unsigned long arch_align_stack(unsigned long sp)
741 {
742 if (randomize_va_space)
743 sp -= get_random_int() % 8192;
744 return sp & ~0xf;
745 }