ia64/linux-2.6.18-xen.hg

view drivers/xen/blktap2/blktap.h @ 896:f59c5daed527

blktap2: use blk_rq_map_sg() here too

Just like in blkfront, not doing so can cause the maximum number of
segments check to trigger.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 04 10:46:54 2009 +0100 (2009-06-04)
parents eba6fe6d8d53
children
line source
1 #ifndef _BLKTAP_H_
2 #define _BLKTAP_H_
4 #include <linux/fs.h>
5 #include <linux/poll.h>
6 #include <linux/cdev.h>
7 #include <linux/scatterlist.h>
8 #include <xen/blkif.h>
9 #include <xen/gnttab.h>
11 //#define ENABLE_PASSTHROUGH
13 extern int blktap_debug_level;
15 #define BTPRINTK(level, tag, force, _f, _a...) \
16 do { \
17 if (blktap_debug_level > level && \
18 (force || printk_ratelimit())) \
19 printk(tag "%s: " _f, __func__, ##_a); \
20 } while (0)
22 #define BTDBG(_f, _a...) BTPRINTK(8, KERN_DEBUG, 1, _f, ##_a)
23 #define BTINFO(_f, _a...) BTPRINTK(0, KERN_INFO, 0, _f, ##_a)
24 #define BTWARN(_f, _a...) BTPRINTK(0, KERN_WARNING, 0, _f, ##_a)
25 #define BTERR(_f, _a...) BTPRINTK(0, KERN_ERR, 0, _f, ##_a)
27 #define MAX_BLKTAP_DEVICE 256
29 #define BLKTAP_CONTROL 1
30 #define BLKTAP_RING_FD 2
31 #define BLKTAP_RING_VMA 3
32 #define BLKTAP_DEVICE 4
33 #define BLKTAP_SYSFS 5
34 #define BLKTAP_PAUSE_REQUESTED 6
35 #define BLKTAP_PAUSED 7
36 #define BLKTAP_SHUTDOWN_REQUESTED 8
37 #define BLKTAP_PASSTHROUGH 9
38 #define BLKTAP_DEFERRED 10
40 /* blktap IOCTLs: */
41 #define BLKTAP2_IOCTL_KICK_FE 1
42 #define BLKTAP2_IOCTL_ALLOC_TAP 200
43 #define BLKTAP2_IOCTL_FREE_TAP 201
44 #define BLKTAP2_IOCTL_CREATE_DEVICE 202
45 #define BLKTAP2_IOCTL_SET_PARAMS 203
46 #define BLKTAP2_IOCTL_PAUSE 204
47 #define BLKTAP2_IOCTL_REOPEN 205
48 #define BLKTAP2_IOCTL_RESUME 206
50 #define BLKTAP2_MAX_MESSAGE_LEN 256
52 #define BLKTAP2_RING_MESSAGE_PAUSE 1
53 #define BLKTAP2_RING_MESSAGE_RESUME 2
54 #define BLKTAP2_RING_MESSAGE_CLOSE 3
56 #define BLKTAP_REQUEST_FREE 0
57 #define BLKTAP_REQUEST_PENDING 1
59 /*
60 * The maximum number of requests that can be outstanding at any time
61 * is determined by
62 *
63 * [mmap_alloc * MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST]
64 *
65 * where mmap_alloc < MAX_DYNAMIC_MEM.
66 *
67 * TODO:
68 * mmap_alloc is initialised to 2 and should be adjustable on the fly via
69 * sysfs.
70 */
71 #define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
72 #define MAX_DYNAMIC_MEM BLK_RING_SIZE
73 #define MAX_PENDING_REQS BLK_RING_SIZE
74 #define MMAP_PAGES (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
75 #define MMAP_VADDR(_start, _req, _seg) \
76 (_start + \
77 ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \
78 ((_seg) * PAGE_SIZE))
80 #define blktap_get(_b) (atomic_inc(&(_b)->refcnt))
81 #define blktap_put(_b) \
82 do { \
83 if (atomic_dec_and_test(&(_b)->refcnt)) \
84 wake_up(&(_b)->wq); \
85 } while (0)
87 struct blktap;
89 struct grant_handle_pair {
90 grant_handle_t kernel;
91 grant_handle_t user;
92 };
93 #define INVALID_GRANT_HANDLE 0xFFFF
95 struct blktap_handle {
96 unsigned int ring;
97 unsigned int device;
98 unsigned int minor;
99 };
101 struct blktap_params {
102 char name[BLKTAP2_MAX_MESSAGE_LEN];
103 unsigned long long capacity;
104 unsigned long sector_size;
105 };
107 struct blktap_device {
108 int users;
109 spinlock_t lock;
110 struct gendisk *gd;
112 #ifdef ENABLE_PASSTHROUGH
113 struct block_device *bdev;
114 #endif
115 };
117 struct blktap_ring {
118 struct vm_area_struct *vma;
119 blkif_front_ring_t ring;
120 struct vm_foreign_map foreign_map;
121 unsigned long ring_vstart;
122 unsigned long user_vstart;
124 int response;
126 wait_queue_head_t poll_wait;
128 dev_t devno;
129 struct class_device *dev;
130 atomic_t sysfs_refcnt;
131 struct mutex sysfs_mutex;
132 };
134 struct blktap_statistics {
135 unsigned long st_print;
136 int st_rd_req;
137 int st_wr_req;
138 int st_oo_req;
139 int st_rd_sect;
140 int st_wr_sect;
141 s64 st_rd_cnt;
142 s64 st_rd_sum_usecs;
143 s64 st_rd_max_usecs;
144 s64 st_wr_cnt;
145 s64 st_wr_sum_usecs;
146 s64 st_wr_max_usecs;
147 };
149 struct blktap_request {
150 uint64_t id;
151 uint16_t usr_idx;
153 uint8_t status;
154 atomic_t pendcnt;
155 uint8_t nr_pages;
156 unsigned short operation;
158 struct timeval time;
159 struct grant_handle_pair handles[BLKIF_MAX_SEGMENTS_PER_REQUEST];
160 struct list_head free_list;
161 };
163 struct blktap {
164 int minor;
165 pid_t pid;
166 atomic_t refcnt;
167 unsigned long dev_inuse;
169 struct blktap_params params;
171 struct rw_semaphore tap_sem;
173 struct blktap_ring ring;
174 struct blktap_device device;
176 int pending_cnt;
177 struct blktap_request *pending_requests[MAX_PENDING_REQS];
178 struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
180 wait_queue_head_t wq;
181 struct list_head deferred_queue;
183 struct blktap_statistics stats;
184 };
186 extern struct blktap *blktaps[MAX_BLKTAP_DEVICE];
188 static inline int
189 blktap_active(struct blktap *tap)
190 {
191 return test_bit(BLKTAP_RING_VMA, &tap->dev_inuse);
192 }
194 static inline int
195 blktap_validate_params(struct blktap *tap, struct blktap_params *params)
196 {
197 /* TODO: sanity check */
198 params->name[sizeof(params->name) - 1] = '\0';
199 BTINFO("%s: capacity: %llu, sector-size: %lu\n",
200 params->name, params->capacity, params->sector_size);
201 return 0;
202 }
204 int blktap_control_destroy_device(struct blktap *);
206 int blktap_ring_init(int *);
207 int blktap_ring_free(void);
208 int blktap_ring_create(struct blktap *);
209 int blktap_ring_destroy(struct blktap *);
210 int blktap_ring_pause(struct blktap *);
211 int blktap_ring_resume(struct blktap *);
212 void blktap_ring_kick_user(struct blktap *);
214 int blktap_sysfs_init(void);
215 void blktap_sysfs_free(void);
216 int blktap_sysfs_create(struct blktap *);
217 int blktap_sysfs_destroy(struct blktap *);
219 int blktap_device_init(int *);
220 void blktap_device_free(void);
221 int blktap_device_create(struct blktap *);
222 int blktap_device_destroy(struct blktap *);
223 int blktap_device_pause(struct blktap *);
224 int blktap_device_resume(struct blktap *);
225 void blktap_device_restart(struct blktap *);
226 void blktap_device_finish_request(struct blktap *,
227 blkif_response_t *,
228 struct blktap_request *);
229 void blktap_device_fail_pending_requests(struct blktap *);
230 #ifdef ENABLE_PASSTHROUGH
231 int blktap_device_enable_passthrough(struct blktap *,
232 unsigned, unsigned);
233 #endif
235 void blktap_defer(struct blktap *);
236 void blktap_run_deferred(void);
238 int blktap_request_pool_init(void);
239 void blktap_request_pool_free(void);
240 int blktap_request_pool_grow(void);
241 int blktap_request_pool_shrink(void);
242 struct blktap_request *blktap_request_allocate(struct blktap *);
243 void blktap_request_free(struct blktap *, struct blktap_request *);
244 unsigned long request_to_kaddr(struct blktap_request *, int);
246 #endif