direct-io.hg

view linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/desc.h @ 11509:2e6c10dc7c0b

[POWERPC][XEN] make sure put_domain() is called in case of allocate_rma() failuer

Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Tue Sep 12 10:53:46 2006 -0400 (2006-09-12)
parents 3b74edc512b4
children
line source
1 #ifndef __ARCH_DESC_H
2 #define __ARCH_DESC_H
4 #include <asm/ldt.h>
5 #include <asm/segment.h>
7 #define CPU_16BIT_STACK_SIZE 1024
9 #ifndef __ASSEMBLY__
11 #include <linux/preempt.h>
12 #include <linux/smp.h>
14 #include <asm/mmu.h>
16 extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
18 DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
20 struct Xgt_desc_struct {
21 unsigned short size;
22 unsigned long address __attribute__((packed));
23 unsigned short pad;
24 } __attribute__ ((packed));
26 extern struct Xgt_desc_struct idt_descr;
27 DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
30 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
31 {
32 return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
33 }
35 #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
36 #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
38 #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
39 #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
40 #define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
41 #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))
43 #define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
44 #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
45 #define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
46 #define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt))
48 /*
49 * This is the ldt that every process will get unless we need
50 * something other than this.
51 */
52 extern struct desc_struct default_ldt[];
53 extern void set_intr_gate(unsigned int irq, void * addr);
55 #define _set_tssldt_desc(n,addr,limit,type) \
56 __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
57 "movw %w1,2(%2)\n\t" \
58 "rorl $16,%1\n\t" \
59 "movb %b1,4(%2)\n\t" \
60 "movb %4,5(%2)\n\t" \
61 "movb $0,6(%2)\n\t" \
62 "movb %h1,7(%2)\n\t" \
63 "rorl $16,%1" \
64 : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
66 #ifndef CONFIG_X86_NO_TSS
67 static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
68 {
69 _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
70 offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
71 }
73 #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
74 #endif
76 static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
77 {
78 _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
79 }
81 #define LDT_entry_a(info) \
82 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
84 #define LDT_entry_b(info) \
85 (((info)->base_addr & 0xff000000) | \
86 (((info)->base_addr & 0x00ff0000) >> 16) | \
87 ((info)->limit & 0xf0000) | \
88 (((info)->read_exec_only ^ 1) << 9) | \
89 ((info)->contents << 10) | \
90 (((info)->seg_not_present ^ 1) << 15) | \
91 ((info)->seg_32bit << 22) | \
92 ((info)->limit_in_pages << 23) | \
93 ((info)->useable << 20) | \
94 0x7000)
96 #define LDT_empty(info) (\
97 (info)->base_addr == 0 && \
98 (info)->limit == 0 && \
99 (info)->contents == 0 && \
100 (info)->read_exec_only == 1 && \
101 (info)->seg_32bit == 0 && \
102 (info)->limit_in_pages == 0 && \
103 (info)->seg_not_present == 1 && \
104 (info)->useable == 0 )
106 extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
108 #if TLS_SIZE != 24
109 # error update this code.
110 #endif
112 static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
113 {
114 #define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), *(u64 *)&t->tls_array[i])
115 C(0); C(1); C(2);
116 #undef C
117 }
119 static inline void clear_LDT(void)
120 {
121 int cpu = get_cpu();
123 /*
124 * NB. We load the default_ldt for lcall7/27 handling on demand, as
125 * it slows down context switching. Noone uses it anyway.
126 */
127 cpu = cpu; /* XXX avoid compiler warning */
128 xen_set_ldt(0UL, 0);
129 put_cpu();
130 }
132 /*
133 * load one particular LDT into the current CPU
134 */
135 static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
136 {
137 void *segments = pc->ldt;
138 int count = pc->size;
140 if (likely(!count))
141 segments = NULL;
143 xen_set_ldt((unsigned long)segments, count);
144 }
146 static inline void load_LDT(mm_context_t *pc)
147 {
148 int cpu = get_cpu();
149 load_LDT_nolock(pc, cpu);
150 put_cpu();
151 }
153 static inline unsigned long get_desc_base(unsigned long *desc)
154 {
155 unsigned long base;
156 base = ((desc[0] >> 16) & 0x0000ffff) |
157 ((desc[1] << 16) & 0x00ff0000) |
158 (desc[1] & 0xff000000);
159 return base;
160 }
162 #endif /* !__ASSEMBLY__ */
164 #endif