direct-io.hg

view patches/linux-2.6.16.29/blktap-aio-16_03_06.patch @ 11750:d845c9522d9e

[HVM][SVM] Check if SVM is disabled by the BIOS before enabling it.

Newer BIOS implementations will be able to disable the SVM feature,
although an additional test of an MSR (VMCR 0xC0010114 bit 4) is
necessary (set equals disabled). Bit 4 of MSR 0xc0010114 returns 0
(SVM enabled) on machines with older BIOS' without the SVM disable
feature support.

Signed-off-by: Wei Huang <wei.huang2@amd.com>=20
Signed-off-by: Tom Woller <thomas.woller@amd.com>=20
author kfraser@localhost.localdomain
date Thu Oct 12 16:12:10 2006 +0100 (2006-10-12)
parents 041be3f6b38e
children
line source
1 diff -pruN ../orig-linux-2.6.16.29/fs/aio.c ./fs/aio.c
2 --- ../orig-linux-2.6.16.29/fs/aio.c 2006-09-12 19:02:10.000000000 +0100
3 +++ ./fs/aio.c 2006-09-19 13:58:49.000000000 +0100
4 @@ -34,6 +34,11 @@
5 #include <asm/uaccess.h>
6 #include <asm/mmu_context.h>
8 +#ifdef CONFIG_EPOLL
9 +#include <linux/poll.h>
10 +#include <linux/eventpoll.h>
11 +#endif
12 +
13 #if DEBUG > 1
14 #define dprintk printk
15 #else
16 @@ -1016,6 +1021,10 @@ put_rq:
17 if (waitqueue_active(&ctx->wait))
18 wake_up(&ctx->wait);
20 +#ifdef CONFIG_EPOLL
21 + if (ctx->file && waitqueue_active(&ctx->poll_wait))
22 + wake_up(&ctx->poll_wait);
23 +#endif
24 if (ret)
25 put_ioctx(ctx);
27 @@ -1025,6 +1034,8 @@ put_rq:
28 /* aio_read_evt
29 * Pull an event off of the ioctx's event ring. Returns the number of
30 * events fetched (0 or 1 ;-)
31 + * If ent parameter is 0, just returns the number of events that would
32 + * be fetched.
33 * FIXME: make this use cmpxchg.
34 * TODO: make the ringbuffer user mmap()able (requires FIXME).
35 */
36 @@ -1047,13 +1058,18 @@ static int aio_read_evt(struct kioctx *i
38 head = ring->head % info->nr;
39 if (head != ring->tail) {
40 - struct io_event *evp = aio_ring_event(info, head, KM_USER1);
41 - *ent = *evp;
42 - head = (head + 1) % info->nr;
43 - smp_mb(); /* finish reading the event before updatng the head */
44 - ring->head = head;
45 - ret = 1;
46 - put_aio_ring_event(evp, KM_USER1);
47 + if (ent) { /* event requested */
48 + struct io_event *evp =
49 + aio_ring_event(info, head, KM_USER1);
50 + *ent = *evp;
51 + head = (head + 1) % info->nr;
52 + /* finish reading the event before updatng the head */
53 + smp_mb();
54 + ring->head = head;
55 + ret = 1;
56 + put_aio_ring_event(evp, KM_USER1);
57 + } else /* only need to know availability */
58 + ret = 1;
59 }
60 spin_unlock(&info->ring_lock);
62 @@ -1236,9 +1252,78 @@ static void io_destroy(struct kioctx *io
64 aio_cancel_all(ioctx);
65 wait_for_all_aios(ioctx);
66 +#ifdef CONFIG_EPOLL
67 + /* forget the poll file, but it's up to the user to close it */
68 + if (ioctx->file) {
69 + ioctx->file->private_data = 0;
70 + ioctx->file = 0;
71 + }
72 +#endif
73 put_ioctx(ioctx); /* once for the lookup */
74 }
76 +#ifdef CONFIG_EPOLL
77 +
78 +static int aio_queue_fd_close(struct inode *inode, struct file *file)
79 +{
80 + struct kioctx *ioctx = file->private_data;
81 + if (ioctx) {
82 + file->private_data = 0;
83 + spin_lock_irq(&ioctx->ctx_lock);
84 + ioctx->file = 0;
85 + spin_unlock_irq(&ioctx->ctx_lock);
86 + }
87 + return 0;
88 +}
89 +
90 +static unsigned int aio_queue_fd_poll(struct file *file, poll_table *wait)
91 +{ unsigned int pollflags = 0;
92 + struct kioctx *ioctx = file->private_data;
93 +
94 + if (ioctx) {
95 +
96 + spin_lock_irq(&ioctx->ctx_lock);
97 + /* Insert inside our poll wait queue */
98 + poll_wait(file, &ioctx->poll_wait, wait);
99 +
100 + /* Check our condition */
101 + if (aio_read_evt(ioctx, 0))
102 + pollflags = POLLIN | POLLRDNORM;
103 + spin_unlock_irq(&ioctx->ctx_lock);
104 + }
105 +
106 + return pollflags;
107 +}
108 +
109 +static struct file_operations aioq_fops = {
110 + .release = aio_queue_fd_close,
111 + .poll = aio_queue_fd_poll
112 +};
113 +
114 +/* make_aio_fd:
115 + * Create a file descriptor that can be used to poll the event queue.
116 + * Based and piggybacked on the excellent epoll code.
117 + */
118 +
119 +static int make_aio_fd(struct kioctx *ioctx)
120 +{
121 + int error, fd;
122 + struct inode *inode;
123 + struct file *file;
124 +
125 + error = ep_getfd(&fd, &inode, &file, NULL, &aioq_fops);
126 + if (error)
127 + return error;
128 +
129 + /* associate the file with the IO context */
130 + file->private_data = ioctx;
131 + ioctx->file = file;
132 + init_waitqueue_head(&ioctx->poll_wait);
133 + return fd;
134 +}
135 +#endif
136 +
137 +
138 /* sys_io_setup:
139 * Create an aio_context capable of receiving at least nr_events.
140 * ctxp must not point to an aio_context that already exists, and
141 @@ -1251,18 +1336,30 @@ static void io_destroy(struct kioctx *io
142 * resources are available. May fail with -EFAULT if an invalid
143 * pointer is passed for ctxp. Will fail with -ENOSYS if not
144 * implemented.
145 + *
146 + * To request a selectable fd, the user context has to be initialized
147 + * to 1, instead of 0, and the return value is the fd.
148 + * This keeps the system call compatible, since a non-zero value
149 + * was not allowed so far.
150 */
151 asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
152 {
153 struct kioctx *ioctx = NULL;
154 unsigned long ctx;
155 long ret;
156 + int make_fd = 0;
158 ret = get_user(ctx, ctxp);
159 if (unlikely(ret))
160 goto out;
162 ret = -EINVAL;
163 +#ifdef CONFIG_EPOLL
164 + if (ctx == 1) {
165 + make_fd = 1;
166 + ctx = 0;
167 + }
168 +#endif
169 if (unlikely(ctx || nr_events == 0)) {
170 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
171 ctx, nr_events);
172 @@ -1273,8 +1370,12 @@ asmlinkage long sys_io_setup(unsigned nr
173 ret = PTR_ERR(ioctx);
174 if (!IS_ERR(ioctx)) {
175 ret = put_user(ioctx->user_id, ctxp);
176 - if (!ret)
177 - return 0;
178 +#ifdef CONFIG_EPOLL
179 + if (make_fd && ret >= 0)
180 + ret = make_aio_fd(ioctx);
181 +#endif
182 + if (ret >= 0)
183 + return ret;
185 get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
186 io_destroy(ioctx);
187 diff -pruN ../orig-linux-2.6.16.29/fs/eventpoll.c ./fs/eventpoll.c
188 --- ../orig-linux-2.6.16.29/fs/eventpoll.c 2006-09-12 19:02:10.000000000 +0100
189 +++ ./fs/eventpoll.c 2006-09-19 13:58:49.000000000 +0100
190 @@ -235,8 +235,6 @@ struct ep_pqueue {
192 static void ep_poll_safewake_init(struct poll_safewake *psw);
193 static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq);
194 -static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
195 - struct eventpoll *ep);
196 static int ep_alloc(struct eventpoll **pep);
197 static void ep_free(struct eventpoll *ep);
198 static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd);
199 @@ -266,7 +264,7 @@ static int ep_events_transfer(struct eve
200 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
201 int maxevents, long timeout);
202 static int eventpollfs_delete_dentry(struct dentry *dentry);
203 -static struct inode *ep_eventpoll_inode(void);
204 +static struct inode *ep_eventpoll_inode(struct file_operations *fops);
205 static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type,
206 int flags, const char *dev_name,
207 void *data);
208 @@ -525,7 +523,7 @@ asmlinkage long sys_epoll_create(int siz
209 * Creates all the items needed to setup an eventpoll file. That is,
210 * a file structure, and inode and a free file descriptor.
211 */
212 - error = ep_getfd(&fd, &inode, &file, ep);
213 + error = ep_getfd(&fd, &inode, &file, ep, &eventpoll_fops);
214 if (error)
215 goto eexit_2;
217 @@ -710,8 +708,8 @@ eexit_1:
218 /*
219 * Creates the file descriptor to be used by the epoll interface.
220 */
221 -static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
222 - struct eventpoll *ep)
223 +int ep_getfd(int *efd, struct inode **einode, struct file **efile,
224 + struct eventpoll *ep, struct file_operations *fops)
225 {
226 struct qstr this;
227 char name[32];
228 @@ -727,7 +725,7 @@ static int ep_getfd(int *efd, struct ino
229 goto eexit_1;
231 /* Allocates an inode from the eventpoll file system */
232 - inode = ep_eventpoll_inode();
233 + inode = ep_eventpoll_inode(fops);
234 error = PTR_ERR(inode);
235 if (IS_ERR(inode))
236 goto eexit_2;
237 @@ -758,7 +756,7 @@ static int ep_getfd(int *efd, struct ino
239 file->f_pos = 0;
240 file->f_flags = O_RDONLY;
241 - file->f_op = &eventpoll_fops;
242 + file->f_op = fops;
243 file->f_mode = FMODE_READ;
244 file->f_version = 0;
245 file->private_data = ep;
246 @@ -1574,7 +1572,7 @@ static int eventpollfs_delete_dentry(str
247 }
250 -static struct inode *ep_eventpoll_inode(void)
251 +static struct inode *ep_eventpoll_inode(struct file_operations *fops)
252 {
253 int error = -ENOMEM;
254 struct inode *inode = new_inode(eventpoll_mnt->mnt_sb);
255 @@ -1582,7 +1580,7 @@ static struct inode *ep_eventpoll_inode(
256 if (!inode)
257 goto eexit_1;
259 - inode->i_fop = &eventpoll_fops;
260 + inode->i_fop = fops;
262 /*
263 * Mark the inode dirty from the very beginning,
264 diff -pruN ../orig-linux-2.6.16.29/include/linux/aio.h ./include/linux/aio.h
265 --- ../orig-linux-2.6.16.29/include/linux/aio.h 2006-09-12 19:02:10.000000000 +0100
266 +++ ./include/linux/aio.h 2006-09-19 13:58:49.000000000 +0100
267 @@ -191,6 +191,11 @@ struct kioctx {
268 struct aio_ring_info ring_info;
270 struct work_struct wq;
271 +#ifdef CONFIG_EPOLL
272 + // poll integration
273 + wait_queue_head_t poll_wait;
274 + struct file *file;
275 +#endif
276 };
278 /* prototypes */
279 diff -pruN ../orig-linux-2.6.16.29/include/linux/eventpoll.h ./include/linux/eventpoll.h
280 --- ../orig-linux-2.6.16.29/include/linux/eventpoll.h 2006-09-12 19:02:10.000000000 +0100
281 +++ ./include/linux/eventpoll.h 2006-09-19 13:58:49.000000000 +0100
282 @@ -86,6 +86,12 @@ static inline void eventpoll_release(str
283 }
286 +/*
287 + * called by aio code to create fd that can poll the aio event queueQ
288 + */
289 +struct eventpoll;
290 +int ep_getfd(int *efd, struct inode **einode, struct file **efile,
291 + struct eventpoll *ep, struct file_operations *fops);
292 #else
294 static inline void eventpoll_init_file(struct file *file) {}