obj-$(CONFIG_BLK_DEV_SR) += cdrom.o
obj-$(CONFIG_PARIDE_PCD) += cdrom.o
obj-$(CONFIG_CDROM_PKTCDVD) += cdrom.o
-obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += cdrom.o
obj-$(CONFIG_VIOCD) += viocd.o cdrom.o
obj-$(CONFIG_GDROM) += gdrom.o cdrom.o
/* VM /proc information for memory */
extern unsigned long totalram_pages;
-#if !defined(MODULE) && defined(CONFIG_HIGHMEM)
+#ifndef MODULE
extern unsigned long totalhigh_pages;
#define inc_totalhigh_pages() (totalhigh_pages++)
#define dec_totalhigh_pages() (totalhigh_pages--)
static LIST_HEAD(ballooned_pages);
/* Main work function, always executed in process context. */
-static void balloon_process(struct work_struct *unused);
-static DECLARE_WORK(balloon_worker, balloon_process);
+static void balloon_process(void *unused);
+static DECLARE_WORK(balloon_worker, balloon_process, NULL);
static struct timer_list balloon_timer;
/* When ballooning out (allocating memory to return to Xen) we don't really
return target;
}
-unsigned long balloon_minimum_target(void)
+static unsigned long minimum_target(void)
{
#ifndef CONFIG_XEN
#define max_pfn num_physpages
* by the balloon lock), or with changes to the Xen hard limit, but we will
* recover from these in time.
*/
-static void balloon_process(struct work_struct *unused)
+static void balloon_process(void *unused)
{
int need_sleep = 0;
long credit;
{
/* No need for lock. Not read-modify-write updates. */
bs.hard_limit = ~0UL;
- bs.target_pages = max(target, balloon_minimum_target());
+ bs.target_pages = max(target, minimum_target());
schedule_work(&balloon_worker);
}
page,
"Current allocation: %8lu kB\n"
"Requested target: %8lu kB\n"
- "Minimum target: %8lu kB\n"
- "Maximum target: %8lu kB\n"
"Low-mem balloon: %8lu kB\n"
"High-mem balloon: %8lu kB\n"
"Driver pages: %8lu kB\n"
"Xen hard limit: ",
PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages),
- PAGES2KB(balloon_minimum_target()), PAGES2KB(num_physpages),
PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high),
PAGES2KB(bs.driver_pages));
goto out;
}
-static void _free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages, int free_vec)
+void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
{
unsigned long flags;
int i;
}
balloon_unlock(flags);
- if (free_vec)
- kfree(pagevec);
- else
- totalram_pages = bs.current_pages -= nr_pages;
+ kfree(pagevec);
schedule_work(&balloon_worker);
}
-void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
-{
- _free_empty_pages_and_pagevec(pagevec, nr_pages, 1);
-}
-
-void free_empty_pages(struct page **pagevec, int nr_pages)
-{
- _free_empty_pages_and_pagevec(pagevec, nr_pages, 0);
-}
-
void balloon_release_driver_page(struct page *page)
{
unsigned long flags;
unsigned long balloon_high;
};
-extern unsigned long num_physpages;
extern struct balloon_stats balloon_stats;
#define bs balloon_stats
void balloon_sysfs_exit(void);
void balloon_set_new_target(unsigned long target);
-unsigned long balloon_minimum_target(void);
#endif /* __XEN_BALLOON_COMMON_H__ */
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/sysdev.h>
-#include <linux/module.h>
#include "common.h"
#ifdef HAVE_XEN_PLATFORM_COMPAT_H
#define BALLOON_SHOW(name, format, args...) \
static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
char *buf) \
{ \
return sprintf(buf, format, ##args); \
static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages));
-BALLOON_SHOW(min_kb, "%lu\n", PAGES2KB(balloon_minimum_target()));
-BALLOON_SHOW(max_kb, "%lu\n", PAGES2KB(num_physpages));
BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low));
BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high));
BALLOON_SHOW(hard_limit_kb,
(bs.hard_limit!=~0UL) ? PAGES2KB(bs.hard_limit) : 0);
BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages));
-static ssize_t show_target_kb(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_target_kb(struct sys_device *dev, char *buf)
{
return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages));
}
static ssize_t store_target_kb(struct sys_device *dev,
- struct sysdev_attribute *attr,
- const char *buf, size_t count)
+ const char *buf,
+ size_t count)
{
char memstring[64], *endchar;
unsigned long long target_bytes;
static struct attribute *balloon_info_attrs[] = {
&attr_current_kb.attr,
- &attr_min_kb.attr,
- &attr_max_kb.attr,
&attr_low_kb.attr,
&attr_high_kb.attr,
&attr_hard_limit_kb.attr,
};
static struct sysdev_class balloon_sysdev_class = {
- .name = BALLOON_CLASS_NAME,
+ set_kset_name(BALLOON_CLASS_NAME),
};
static struct sys_device balloon_sysdev;
obj-$(CONFIG_XEN_BLKDEV_BACKEND) := blkbk.o
-blkbk-y := blkback.o xenbus.o interface.o vbd.o cdrom.o
+blkbk-y := blkback.o xenbus.o interface.o vbd.o
#include <linux/spinlock.h>
#include <linux/kthread.h>
-#include <linux/freezer.h>
#include <linux/list.h>
#include <linux/delay.h>
#include <xen/balloon.h>
return;
if (blkif->plug->unplug_fn)
blkif->plug->unplug_fn(blkif->plug);
- kobject_put(&blkif->plug->kobj);
+ blk_put_queue(blkif->plug);
blkif->plug = NULL;
}
static void plug_queue(blkif_t *blkif, struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
+ request_queue_t *q = bdev_get_queue(bdev);
if (q == blkif->plug)
return;
unplug_queue(blkif);
- WARN_ON(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags));
- kobject_get(&q->kobj);
+ blk_get_queue(q);
blkif->plug = q;
}
static void print_stats(blkif_t *blkif)
{
- printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d | pk %4d\n",
+ printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n",
current->comm, blkif->st_oo_req,
- blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req,
- blkif->st_pk_req);
+ blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
blkif->st_rd_req = 0;
blkif->st_wr_req = 0;
blkif->st_oo_req = 0;
- blkif->st_pk_req = 0;
}
int blkif_schedule(void *arg)
}
}
-static void end_block_io_op(struct bio *bio, int error)
+static int end_block_io_op(struct bio *bio, unsigned int done, int error)
{
+ if (bio->bi_size != 0)
+ return 1;
__end_block_io_op(bio->bi_private, error);
bio_put(bio);
+ return error;
}
wake_up(&blkif->wq);
}
-irqreturn_t blkif_be_int(int irq, void *dev_id)
+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
{
blkif_notify_work(dev_id);
return IRQ_HANDLED;
blkif->st_wr_req++;
dispatch_rw_block_io(blkif, &req, pending_req);
break;
- case BLKIF_OP_PACKET:
- DPRINTK("error: block operation BLKIF_OP_PACKET not implemented\n");
- blkif->st_pk_req++;
- make_response(blkif, req.id, req.operation,
- BLKIF_RSP_ERROR);
- free_req(pending_req);
- break;
default:
/* A good sign something is wrong: sleep for a while to
* avoid excessive CPU consumption by a bad guest. */
+++ /dev/null
-/******************************************************************************
- * blkback/cdrom.c
- *
- * Routines for managing cdrom watch and media-present attribute of a
- * cdrom type virtual block device (VBD).
- *
- * Copyright (c) 2003-2005, Keir Fraser & Steve Hand
- * Copyright (c) 2007 Pat Campbell
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include "common.h"
-
-#undef DPRINTK
-#define DPRINTK(_f, _a...) \
- printk("(%s() file=%s, line=%d) " _f "\n", \
- __PRETTY_FUNCTION__, __FILE__ , __LINE__ , ##_a )
-
-
-#define MEDIA_PRESENT "media-present"
-
-static void cdrom_media_changed(struct xenbus_watch *, const char **, unsigned int);
-
-/**
- * Writes media-present=1 attribute for the given vbd device if not
- * already there
- */
-static int cdrom_xenstore_write_media_present(struct backend_info *be)
-{
- struct xenbus_device *dev = be->dev;
- struct xenbus_transaction xbt;
- int err;
- int media_present;
-
- err = xenbus_scanf(XBT_NIL, dev->nodename, MEDIA_PRESENT, "%d",
- &media_present);
- if (0 < err) {
- DPRINTK("already written err%d", err);
- return(0);
- }
- media_present = 1;
-
-again:
- err = xenbus_transaction_start(&xbt);
- if (err) {
- xenbus_dev_fatal(dev, err, "starting transaction");
- return(-1);
- }
-
- err = xenbus_printf(xbt, dev->nodename, MEDIA_PRESENT, "%d", media_present );
- if (err) {
- xenbus_dev_fatal(dev, err, "writing %s/%s",
- dev->nodename, MEDIA_PRESENT);
- goto abort;
- }
- err = xenbus_transaction_end(xbt, 0);
- if (err == -EAGAIN)
- goto again;
- if (err)
- xenbus_dev_fatal(dev, err, "ending transaction");
- return 0;
- abort:
- xenbus_transaction_end(xbt, 1);
- return -1;
-}
-
-/**
- *
- */
-static int cdrom_is_type(struct backend_info *be)
-{
- DPRINTK("type:%x", be->blkif->vbd.type );
- return (be->blkif->vbd.type & VDISK_CDROM)
- && (be->blkif->vbd.type & GENHD_FL_REMOVABLE);
-}
-
-/**
- *
- */
-void cdrom_add_media_watch(struct backend_info *be)
-{
- struct xenbus_device *dev = be->dev;
- int err;
-
- DPRINTK("nodename:%s", dev->nodename);
- if (cdrom_is_type(be)) {
- DPRINTK("is a cdrom");
- if ( cdrom_xenstore_write_media_present(be) == 0 ) {
- DPRINTK( "xenstore wrote OK");
- err = xenbus_watch_path2(dev, dev->nodename, MEDIA_PRESENT,
- &be->backend_cdrom_watch,
- cdrom_media_changed);
- if (err)
- DPRINTK( "media_present watch add failed" );
- }
- }
-}
-
-/**
- * Callback received when the "media_present" xenstore node is changed
- */
-static void cdrom_media_changed(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
-{
- int err;
- unsigned media_present;
- struct backend_info *be
- = container_of(watch, struct backend_info, backend_cdrom_watch);
- struct xenbus_device *dev = be->dev;
-
- if (!cdrom_is_type(be)) {
- DPRINTK("callback not for a cdrom" );
- return;
- }
-
- err = xenbus_scanf(XBT_NIL, dev->nodename, MEDIA_PRESENT, "%d",
- &media_present);
- if (err == 0 || err == -ENOENT) {
- DPRINTK("xenbus_read of cdrom media_present node error:%d",err);
- return;
- }
-
- if (media_present == 0)
- vbd_free(&be->blkif->vbd);
- else {
- char *p = strrchr(dev->otherend, '/') + 1;
- long handle = simple_strtoul(p, NULL, 0);
-
- if (!be->blkif->vbd.bdev) {
- err = vbd_create(be->blkif, handle, be->major, be->minor,
- !strchr(be->mode, 'w'), 1);
- if (err) {
- be->major = be->minor = 0;
- xenbus_dev_fatal(dev, err, "creating vbd structure");
- return;
- }
- }
- }
-}
wait_queue_head_t wq;
struct task_struct *xenblkd;
unsigned int waiting_reqs;
- struct request_queue *plug;
+ request_queue_t *plug;
/* statistics */
unsigned long st_print;
int st_wr_req;
int st_oo_req;
int st_br_req;
- int st_pk_req;
int st_rd_sect;
int st_wr_sect;
grant_ref_t shmem_ref;
} blkif_t;
-struct backend_info
-{
- struct xenbus_device *dev;
- blkif_t *blkif;
- struct xenbus_watch backend_watch;
- struct xenbus_watch backend_cdrom_watch;
- unsigned major;
- unsigned minor;
- char *mode;
-};
-
blkif_t *blkif_alloc(domid_t domid);
void blkif_disconnect(blkif_t *blkif);
void blkif_free(blkif_t *blkif);
void blkif_xenbus_init(void);
-irqreturn_t blkif_be_int(int irq, void *dev_id);
+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
int blkif_schedule(void *arg);
int blkback_barrier(struct xenbus_transaction xbt,
struct backend_info *be, int state);
-/* cdrom media change */
-void cdrom_add_media_watch(struct backend_info *be);
-
#endif /* __BLKIF__BACKEND__COMMON_H__ */
#include <xen/evtchn.h>
#include <linux/kthread.h>
-static struct kmem_cache *blkif_cachep;
+static kmem_cache_t *blkif_cachep;
blkif_t *blkif_alloc(domid_t domid)
{
void __init blkif_interface_init(void)
{
blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t),
- 0, 0, NULL);
+ 0, 0, NULL, NULL);
}
if ((operation != READ) && vbd->readonly)
goto out;
- if (vbd->bdev == NULL)
- goto out;
-
if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
goto out;
#include <linux/module.h>
#include <linux/kthread.h>
#include "common.h"
-#include "../core/domctl.h"
#undef DPRINTK
#define DPRINTK(fmt, args...) \
pr_debug("blkback/xenbus (%s:%d) " fmt ".\n", \
__FUNCTION__, __LINE__, ##args)
+struct backend_info
+{
+ struct xenbus_device *dev;
+ blkif_t *blkif;
+ struct xenbus_watch backend_watch;
+ unsigned major;
+ unsigned minor;
+ char *mode;
+};
+
static void connect(struct backend_info *);
static int connect_ring(struct backend_info *);
static void backend_changed(struct xenbus_watch *, const char **,
be->backend_watch.node = NULL;
}
- if (be->backend_cdrom_watch.node) {
- unregister_xenbus_watch(&be->backend_cdrom_watch);
- kfree(be->backend_cdrom_watch.node);
- be->backend_cdrom_watch.node = NULL;
- }
-
if (be->blkif) {
blkif_disconnect(be->blkif);
vbd_free(&be->blkif->vbd);
/* We're potentially connected now */
update_blkif_status(be->blkif);
-
- /* Add watch for cdrom media status if necessay */
- cdrom_add_media_watch(be);
}
}
be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
"%63s", protocol, NULL);
- if (err) {
- strcpy(protocol, "unspecified");
- be->blkif->blk_protocol = xen_guest_blkif_protocol(be->blkif->domid);
- }
+ if (err)
+ strcpy(protocol, "unspecified, assuming native");
else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
-#if 1 /* maintain compatibility with early sles10-sp1 and paravirt netware betas */
- else if (0 == strcmp(protocol, "1"))
- be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
- else if (0 == strcmp(protocol, "2"))
- be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
-#endif
else {
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -1;
static struct xenbus_driver blkback = {
.name = "vbd",
+ .owner = THIS_MODULE,
.ids = blkback_ids,
.probe = blkback_probe,
.remove = blkback_remove,
void blkif_xenbus_init(void)
{
- if (xenbus_register_backend(&blkback))
- BUG();
+ xenbus_register_backend(&blkback);
}
obj-$(CONFIG_XEN_BLKDEV_FRONTEND) := xenblk.o
-xenblk-objs := blkfront.o vbd.o vcd.o
+xenblk-objs := blkfront.o vbd.o
#include <linux/cdrom.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
-#include <linux/scatterlist.h>
#include <scsi/scsi.h>
#include <xen/evtchn.h>
#include <xen/xenbus.h>
static void kick_pending_request_queues(struct blkfront_info *);
-static irqreturn_t blkif_int(int irq, void *dev_id);
-static void blkif_restart_queue(struct work_struct *arg);
+static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs);
+static void blkif_restart_queue(void *arg);
static void blkif_recover(struct blkfront_info *);
static void blkif_completion(struct blk_shadow *);
static void blkif_free(struct blkfront_info *, int);
info->xbdev = dev;
info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED;
- INIT_WORK(&info->work, blkif_restart_queue);
+ INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
for (i = 0; i < BLK_RING_SIZE; i++)
info->shadow[i].req.id = i+1;
SHARED_RING_INIT(sring);
FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
- sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
-
err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
if (err < 0) {
free_page((unsigned long)sring);
info->ring_ref = err;
err = bind_listening_port_to_irqhandler(
- dev->otherend_id, blkif_int, IRQF_SAMPLE_RANDOM, "blkif", info);
+ dev->otherend_id, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
if (err <= 0) {
xenbus_dev_fatal(dev, err,
"bind_listening_port_to_irqhandler");
break;
case XenbusStateClosing:
- if (!info->gd)
- break;
- bd = bdget_disk(info->gd, 0);
+ bd = bdget(info->dev);
if (bd == NULL)
xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
add_disk(info->gd);
info->is_ready = 1;
-
- register_vcd(info);
}
/**
xlvbd_sysfs_delif(info);
- unregister_vcd(info);
-
xlvbd_del(info);
out:
}
}
-static void blkif_restart_queue(struct work_struct *arg)
+static void blkif_restart_queue(void *arg)
{
- struct blkfront_info *info = container_of(arg, struct blkfront_info, work);
+ struct blkfront_info *info = (struct blkfront_info *)arg;
spin_lock_irq(&blkif_io_lock);
if (info->connected == BLKIF_STATE_CONNECTED)
kick_pending_request_queues(info);
struct blkfront_info *info = req->rq_disk->private_data;
unsigned long buffer_mfn;
blkif_request_t *ring_req;
+ struct bio *bio;
+ struct bio_vec *bvec;
+ int idx;
unsigned long id;
unsigned int fsect, lsect;
- int i, ref;
+ int ref;
grant_ref_t gref_head;
- struct scatterlist *sg;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return 1;
BLKIF_OP_WRITE : BLKIF_OP_READ;
if (blk_barrier_rq(req))
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
- if (blk_pc_request(req))
- ring_req->operation = BLKIF_OP_PACKET;
-
- ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
- BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
-
- for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
- buffer_mfn = page_to_phys(sg_page(sg)) >> PAGE_SHIFT;
- fsect = sg->offset >> 9;
- lsect = fsect + (sg->length >> 9) - 1;
- /* install a grant reference. */
- ref = gnttab_claim_grant_reference(&gref_head);
- BUG_ON(ref == -ENOSPC);
-
- gnttab_grant_foreign_access_ref(
- ref,
- info->xbdev->otherend_id,
- buffer_mfn,
- rq_data_dir(req) ? GTF_readonly : 0 );
-
- info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
- ring_req->seg[i] =
- (struct blkif_request_segment) {
- .gref = ref,
- .first_sect = fsect,
- .last_sect = lsect };
+
+ ring_req->nr_segments = 0;
+ rq_for_each_bio (bio, req) {
+ bio_for_each_segment (bvec, bio, idx) {
+ BUG_ON(ring_req->nr_segments
+ == BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
+ fsect = bvec->bv_offset >> 9;
+ lsect = fsect + (bvec->bv_len >> 9) - 1;
+ /* install a grant reference. */
+ ref = gnttab_claim_grant_reference(&gref_head);
+ BUG_ON(ref == -ENOSPC);
+
+ gnttab_grant_foreign_access_ref(
+ ref,
+ info->xbdev->otherend_id,
+ buffer_mfn,
+ rq_data_dir(req) ? GTF_readonly : 0 );
+
+ info->shadow[id].frame[ring_req->nr_segments] =
+ mfn_to_pfn(buffer_mfn);
+
+ ring_req->seg[ring_req->nr_segments] =
+ (struct blkif_request_segment) {
+ .gref = ref,
+ .first_sect = fsect,
+ .last_sect = lsect };
+
+ ring_req->nr_segments++;
+ }
}
info->ring.req_prod_pvt++;
* do_blkif_request
* read a block; request is in a request queue
*/
-void do_blkif_request(struct request_queue *rq)
+void do_blkif_request(request_queue_t *rq)
{
struct blkfront_info *info = NULL;
struct request *req;
while ((req = elv_next_request(rq)) != NULL) {
info = req->rq_disk->private_data;
- if (!blk_fs_request(req) && !blk_pc_request(req)) {
+ if (!blk_fs_request(req)) {
end_request(req, 0);
continue;
}
}
-static irqreturn_t blkif_int(int irq, void *dev_id)
+static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
{
struct request *req;
blkif_response_t *bret;
RING_IDX i, rp;
unsigned long flags;
struct blkfront_info *info = (struct blkfront_info *)dev_id;
+ int uptodate;
spin_lock_irqsave(&blkif_io_lock, flags);
ADD_ID_TO_FREELIST(info, id);
- ret = bret->status == BLKIF_RSP_OKAY ? 0 : -EIO;
+ uptodate = (bret->status == BLKIF_RSP_OKAY);
switch (bret->operation) {
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
printk("blkfront: %s: write barrier op failed\n",
info->gd->disk_name);
- ret = -EOPNOTSUPP;
+ uptodate = -EOPNOTSUPP;
info->feature_barrier = 0;
xlvbd_barrier(info);
}
/* fall through */
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
- case BLKIF_OP_PACKET:
if (unlikely(bret->status != BLKIF_RSP_OKAY))
DPRINTK("Bad return from blkdev data "
"request: %x\n", bret->status);
- ret = __blk_end_request(req, ret, blk_rq_bytes(req));
+ ret = end_that_request_first(req, uptodate,
+ req->hard_nr_sectors);
BUG_ON(ret);
+ end_that_request_last(req, uptodate);
break;
default:
BUG();
static struct xenbus_driver blkfront = {
.name = "vbd",
+ .owner = THIS_MODULE,
.ids = blkfront_ids,
.probe = blkfront_probe,
.remove = blkfront_remove,
struct blkfront_info
{
struct xenbus_device *xbdev;
+ dev_t dev;
struct gendisk *gd;
int vdevice;
blkif_vdev_t handle;
int connected;
int ring_ref;
blkif_front_ring_t ring;
- struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int irq;
struct xlbd_major_info *mi;
- struct request_queue *rq;
+ request_queue_t *rq;
struct work_struct work;
struct gnttab_free_callback callback;
struct blk_shadow shadow[BLK_RING_SIZE];
extern int blkif_getgeo(struct block_device *, struct hd_geometry *);
extern int blkif_check(dev_t dev);
extern int blkif_revalidate(dev_t dev);
-extern void do_blkif_request (struct request_queue *rq);
+extern void do_blkif_request (request_queue_t *rq);
/* Virtual block-device subsystem. */
/* Note that xlvbd_add doesn't call add_disk for you: you're expected
}
#endif
-/* Virtual cdrom block-device */
-extern void register_vcd(struct blkfront_info *info);
-extern void unregister_vcd(struct blkfront_info *info);
-
#endif /* __XEN_DRIVERS_BLOCK_H__ */
static int
xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
{
- struct request_queue *rq;
+ request_queue_t *rq;
rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
if (rq == NULL)
return 0;
}
-int
-xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info,
- u16 sector_size, struct blkfront_info *info)
+static int
+xlvbd_alloc_gendisk(int major, int minor, blkif_sector_t capacity, int vdevice,
+ u16 vdisk_info, u16 sector_size,
+ struct blkfront_info *info)
{
- int major, minor;
struct gendisk *gd;
struct xlbd_major_info *mi;
int nr_minors = 1;
int err = -ENODEV;
unsigned int offset;
- if ((vdevice>>EXT_SHIFT) > 1) {
- /* this is above the extended range; something is wrong */
- printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice);
- return -ENODEV;
- }
-
- if (!VDEV_IS_EXTENDED(vdevice)) {
- major = BLKIF_MAJOR(vdevice);
- minor = BLKIF_MINOR(vdevice);
- }
- else {
- major = 202;
- minor = BLKIF_MINOR_EXT(vdevice);
- }
-
BUG_ON(info->gd != NULL);
BUG_ON(info->mi != NULL);
BUG_ON(info->rq != NULL);
goto out;
info->mi = mi;
- if (!(vdisk_info & VDISK_CDROM) &&
- (minor & ((1 << mi->type->partn_shift) - 1)) == 0)
+ if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
nr_minors = 1 << mi->type->partn_shift;
gd = alloc_disk(nr_minors);
offset = mi->index * mi->type->disks_per_major +
(minor >> mi->type->partn_shift);
- if (nr_minors > 1 || (vdisk_info & VDISK_CDROM)) {
+ if (nr_minors > 1) {
if (offset < 26) {
sprintf(gd->disk_name, "%s%c",
mi->type->diskname, 'a' + offset );
return err;
}
+int
+xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info,
+ u16 sector_size, struct blkfront_info *info)
+{
+ struct block_device *bd;
+ int err = 0;
+ int major, minor;
+
+ if ((vdevice>>EXT_SHIFT) > 1) {
+ /* this is above the extended range; something is wrong */
+ printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice);
+ return -ENODEV;
+ }
+
+ if (!VDEV_IS_EXTENDED(vdevice)) {
+ major = BLKIF_MAJOR(vdevice);
+ minor = BLKIF_MINOR(vdevice);
+ }
+ else {
+ major = 202;
+ minor = BLKIF_MINOR_EXT(vdevice);
+ }
+
+ info->dev = MKDEV(major, minor);
+ bd = bdget(info->dev);
+ if (bd == NULL)
+ return -ENODEV;
+
+ err = xlvbd_alloc_gendisk(major, minor, capacity, vdevice, vdisk_info,
+ sector_size, info);
+
+ bdput(bd);
+ return err;
+}
+
void
xlvbd_del(struct blkfront_info *info)
{
+++ /dev/null
-/*******************************************************************************
-* vcd.c
-*
-* Implements CDROM cmd packet passing between frontend guest and backend driver.
-*
-* Copyright (c) 2008, Pat Campell plc@novell.com
-*
-* Permission is hereby granted, free of charge, to any person obtaining a copy
-* of this source file (the "Software"), to deal in the Software without
-* restriction, including without limitation the rights to use, copy, modify,
-* merge, publish, distribute, sublicense, and/or sell copies of the Software,
-* and to permit persons to whom the Software is furnished to do so, subject to
-* the following conditions:
-*
-* The above copyright notice and this permission notice shall be included in
-* all copies or substantial portions of the Software.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-* IN THE SOFTWARE.
-*/
-
-#define REVISION "$Revision: 1.0 $"
-
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/list.h>
-#include <linux/cdrom.h>
-#include <xen/interface/io/cdromif.h>
-#include "block.h"
-
-/* List of cdrom_device_info, can have as many as blkfront supports */
-struct vcd_disk {
- struct list_head vcd_entry;
- struct cdrom_device_info vcd_cdrom_info;
- spinlock_t vcd_cdrom_info_lock;
-};
-static LIST_HEAD(vcd_disks);
-static DEFINE_SPINLOCK(vcd_disks_lock);
-
-static struct vcd_disk * xencdrom_get_list_entry(struct gendisk *disk)
-{
- struct vcd_disk * ret_vcd = NULL;
- struct vcd_disk * vcd;
-
- spin_lock(&vcd_disks_lock);
- list_for_each_entry(vcd, &vcd_disks, vcd_entry) {
- if (vcd->vcd_cdrom_info.disk == disk) {
- spin_lock(&vcd->vcd_cdrom_info_lock);
- ret_vcd = vcd;
- break;
- }
- }
- spin_unlock(&vcd_disks_lock);
- return ret_vcd;
-}
-
-static void submit_message(struct blkfront_info *info, void * sp)
-{
- struct request *req = NULL;
-
- req = blk_get_request(info->rq, READ, __GFP_WAIT);
- if (blk_rq_map_kern(info->rq, req, sp, PAGE_SIZE, __GFP_WAIT))
- goto out;
-
- req->rq_disk = info->gd;
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
- req->cmd_type = REQ_TYPE_BLOCK_PC;
- req->cmd_flags |= REQ_NOMERGE;
-#else
- req->flags |= REQ_BLOCK_PC;
-#endif
- req->sector = 0;
- req->nr_sectors = 1;
- req->timeout = 60*HZ;
-
- blk_execute_rq(req->q, info->gd, req, 1);
-
-out:
- blk_put_request(req);
-}
-
-static int submit_cdrom_cmd(struct blkfront_info *info,
- struct packet_command * cgc)
-{
- int ret = 0;
- struct page *page;
- size_t size;
- union xen_block_packet *sp;
- struct xen_cdrom_packet *xcp;
- struct vcd_generic_command * vgc;
-
- if (cgc->buffer && cgc->buflen > MAX_PACKET_DATA) {
- printk(KERN_WARNING "%s() Packet buffer length is to large \n", __func__);
- return -EIO;
- }
-
- page = alloc_page(GFP_NOIO);
- if (!page) {
- printk(KERN_CRIT "%s() Unable to allocate page\n", __func__);
- return -ENOMEM;
- }
-
- size = PAGE_SIZE;
- memset(page_address(page), 0, PAGE_SIZE);
- sp = page_address(page);
- xcp = &(sp->xcp);
- xcp->type = XEN_TYPE_CDROM_PACKET;
- xcp->payload_offset = PACKET_PAYLOAD_OFFSET;
-
- vgc = (struct vcd_generic_command *)((char *)sp + xcp->payload_offset);
- memcpy(vgc->cmd, cgc->cmd, CDROM_PACKET_SIZE);
- vgc->stat = cgc->stat;
- vgc->data_direction = cgc->data_direction;
- vgc->quiet = cgc->quiet;
- vgc->timeout = cgc->timeout;
- if (cgc->sense) {
- vgc->sense_offset = PACKET_SENSE_OFFSET;
- memcpy((char *)sp + vgc->sense_offset, cgc->sense, sizeof(struct request_sense));
- }
- if (cgc->buffer) {
- vgc->buffer_offset = PACKET_BUFFER_OFFSET;
- memcpy((char *)sp + vgc->buffer_offset, cgc->buffer, cgc->buflen);
- vgc->buflen = cgc->buflen;
- }
-
- submit_message(info,sp);
-
- if (xcp->ret)
- ret = xcp->err;
-
- if (cgc->sense) {
- memcpy(cgc->sense, (char *)sp + PACKET_SENSE_OFFSET, sizeof(struct request_sense));
- }
- if (cgc->buffer && cgc->buflen) {
- memcpy(cgc->buffer, (char *)sp + PACKET_BUFFER_OFFSET, cgc->buflen);
- }
-
- __free_page(page);
- return ret;
-}
-
-
-static int xencdrom_open(struct cdrom_device_info *cdi, int purpose)
-{
- int ret = 0;
- struct page *page;
- struct blkfront_info *info;
- union xen_block_packet *sp;
- struct xen_cdrom_open *xco;
-
- info = cdi->disk->private_data;
-
- if (strlen(info->xbdev->otherend) > MAX_PACKET_DATA) {
- return -EIO;
- }
-
- page = alloc_page(GFP_NOIO);
- if (!page) {
- printk(KERN_CRIT "%s() Unable to allocate page\n", __func__);
- return -ENOMEM;
- }
-
- memset(page_address(page), 0, PAGE_SIZE);
- sp = page_address(page);
- xco = &(sp->xco);
- xco->type = XEN_TYPE_CDROM_OPEN;
- xco->payload_offset = sizeof(struct xen_cdrom_open);
- strcpy((char *)sp + xco->payload_offset, info->xbdev->otherend);
-
- submit_message(info,sp);
-
- if (xco->ret) {
- ret = xco->err;
- goto out;
- }
-
- if (xco->media_present)
- set_capacity(cdi->disk, xco->sectors);
-
-out:
- __free_page(page);
- return ret;
-}
-
-static void xencdrom_release(struct cdrom_device_info *cdi)
-{
-}
-
-static int xencdrom_media_changed(struct cdrom_device_info *cdi, int disc_nr)
-{
- int ret;
- struct page *page;
- struct blkfront_info *info;
- union xen_block_packet *sp;
- struct xen_cdrom_media_changed *xcmc;
-
- info = cdi->disk->private_data;
-
- page = alloc_page(GFP_NOIO);
- if (!page) {
- printk(KERN_CRIT "%s() Unable to allocate page\n", __func__);
- return -ENOMEM;
- }
-
- memset(page_address(page), 0, PAGE_SIZE);
- sp = page_address(page);
- xcmc = &(sp->xcmc);
- xcmc->type = XEN_TYPE_CDROM_MEDIA_CHANGED;
- submit_message(info,sp);
- ret = xcmc->media_changed;
-
- __free_page(page);
-
- return ret;
-}
-
-static int xencdrom_tray_move(struct cdrom_device_info *cdi, int position)
-{
- int ret;
- struct packet_command cgc;
- struct blkfront_info *info;
-
- info = cdi->disk->private_data;
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.cmd[0] = GPCMD_START_STOP_UNIT;
- if (position)
- cgc.cmd[4] = 2;
- else
- cgc.cmd[4] = 3;
- ret = submit_cdrom_cmd(info, &cgc);
- return ret;
-}
-
-static int xencdrom_lock_door(struct cdrom_device_info *cdi, int lock)
-{
- int ret = 0;
- struct blkfront_info *info;
- struct packet_command cgc;
-
- info = cdi->disk->private_data;
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
- cgc.cmd[4] = lock;
- ret = submit_cdrom_cmd(info, &cgc);
- return ret;
-}
-
-static int xencdrom_packet(struct cdrom_device_info *cdi,
- struct packet_command *cgc)
-{
- int ret = -EIO;
- struct blkfront_info *info;
-
- info = cdi->disk->private_data;
- ret = submit_cdrom_cmd(info, cgc);
- cgc->stat = ret;
- return ret;
-}
-
-static int xencdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
- void *arg)
-{
- return -EINVAL;
-}
-
-/* Query backend to see if CDROM packets are supported */
-static int xencdrom_supported(struct blkfront_info *info)
-{
- struct page *page;
- union xen_block_packet *sp;
- struct xen_cdrom_support *xcs;
-
- page = alloc_page(GFP_NOIO);
- if (!page) {
- printk(KERN_CRIT "%s() Unable to allocate page\n", __func__);
- return -ENOMEM;
- }
-
- memset(page_address(page), 0, PAGE_SIZE);
- sp = page_address(page);
- xcs = &(sp->xcs);
- xcs->type = XEN_TYPE_CDROM_SUPPORT;
- submit_message(info,sp);
- return xcs->supported;
-}
-
-static struct cdrom_device_ops xencdrom_dops = {
- .open = xencdrom_open,
- .release = xencdrom_release,
- .media_changed = xencdrom_media_changed,
- .tray_move = xencdrom_tray_move,
- .lock_door = xencdrom_lock_door,
- .generic_packet = xencdrom_packet,
- .audio_ioctl = xencdrom_audio_ioctl,
- .capability = (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | \
- CDC_MEDIA_CHANGED | CDC_GENERIC_PACKET | CDC_DVD | \
- CDC_CD_R),
- .n_minors = 1,
-};
-
-static int xencdrom_block_open(struct inode *inode, struct file *file)
-{
- struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
- struct vcd_disk * vcd;
- int ret = 0;
-
- if ((vcd = xencdrom_get_list_entry(info->gd))) {
- ret = cdrom_open(&vcd->vcd_cdrom_info, inode, file);
- info->users = vcd->vcd_cdrom_info.use_count;
- spin_unlock(&vcd->vcd_cdrom_info_lock);
- }
- return ret;
-}
-
-static int xencdrom_block_release(struct inode *inode, struct file *file)
-{
- struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
- struct vcd_disk * vcd;
- int ret = 0;
-
- if ((vcd = xencdrom_get_list_entry(info->gd))) {
- ret = cdrom_release(&vcd->vcd_cdrom_info, file);
- spin_unlock(&vcd->vcd_cdrom_info_lock);
- if (vcd->vcd_cdrom_info.use_count == 0) {
- info->users = 1;
- blkif_release(inode, file);
- }
- }
- return ret;
-}
-
-static int xencdrom_block_ioctl(struct inode *inode, struct file *file,
- unsigned cmd, unsigned long arg)
-{
- struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
- struct vcd_disk * vcd;
- int ret = 0;
-
- if (!(vcd = xencdrom_get_list_entry(info->gd)))
- goto out;
-
- switch (cmd) {
- case 2285: /* SG_IO */
- ret = -ENOSYS;
- break;
- case CDROMEJECT:
- ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 1);
- break;
- case CDROMCLOSETRAY:
- ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 0);
- break;
- case CDROM_GET_CAPABILITY:
- ret = vcd->vcd_cdrom_info.ops->capability & ~vcd->vcd_cdrom_info.mask;
- break;
- case CDROM_SET_OPTIONS:
- ret = vcd->vcd_cdrom_info.options;
- break;
- case CDROM_SEND_PACKET:
- {
- struct packet_command * cgc = (struct packet_command *)arg;
- ret = submit_cdrom_cmd(info, cgc);
- }
- break;
- default:
- /* Not supported, augment supported above if necessary */
- printk( "%s():%d Unsupported IOCTL:%x \n", __func__, __LINE__, cmd);
- ret = -ENOTTY;
- break;
- }
- spin_unlock(&vcd->vcd_cdrom_info_lock);
-out:
- return ret;
-}
-
-/* Called as result of cdrom_open, vcd_cdrom_info_lock already held */
-static int xencdrom_block_media_changed(struct gendisk *disk)
-{
- struct vcd_disk * vcd;
- struct vcd_disk * ret_vcd = NULL;
- int ret = 0;
-
- spin_lock(&vcd_disks_lock);
- list_for_each_entry(vcd, &vcd_disks, vcd_entry) {
- if (vcd->vcd_cdrom_info.disk == disk) {
- ret_vcd = vcd;
- break;
- }
- }
- spin_unlock(&vcd_disks_lock);
- if (ret_vcd) {
- ret = cdrom_media_changed(&ret_vcd->vcd_cdrom_info);
- }
- return ret;
-}
-
-static struct block_device_operations xencdrom_bdops =
-{
- .owner = THIS_MODULE,
- .open = xencdrom_block_open,
- .release = xencdrom_block_release,
- .ioctl = xencdrom_block_ioctl,
- .media_changed = xencdrom_block_media_changed,
-};
-
-void register_vcd(struct blkfront_info *info)
-{
- struct gendisk * gd = info->gd;
- struct vcd_disk * vcd;
-
- /* Make sure this is for a CD device */
- if (!(gd->flags & GENHD_FL_CD))
- goto out;
-
- /* Make sure we have backend support */
- if (!xencdrom_supported(info)) {
- goto out;
- }
-
- /* Create new vcd_disk and fill in cdrom_info */
- vcd = (struct vcd_disk *)kzalloc(sizeof(struct vcd_disk), GFP_KERNEL);
- if (!vcd) {
- printk(KERN_INFO "%s(): Unable to allocate vcd struct!\n", __func__);
- goto out;
- }
- spin_lock_init(&vcd->vcd_cdrom_info_lock);
-
- vcd->vcd_cdrom_info.ops = &xencdrom_dops;
- vcd->vcd_cdrom_info.speed = 4;
- vcd->vcd_cdrom_info.capacity = 1;
- vcd->vcd_cdrom_info.options = 0;
- strcpy(vcd->vcd_cdrom_info.name, gd->disk_name);
- vcd->vcd_cdrom_info.mask = ( CDC_CD_RW | CDC_DVD_R | CDC_DVD_RAM |
- CDC_SELECT_DISC | CDC_SELECT_SPEED |
- CDC_MRW | CDC_MRW_W | CDC_RAM);
-
- if (register_cdrom(&(vcd->vcd_cdrom_info)) != 0) {
- printk(KERN_WARNING "%s() Cannot register blkdev as a cdrom %d!\n", __func__,
- gd->major);
- goto err_out;
- }
- xencdrom_bdops.owner = gd->fops->owner;
- gd->fops = &xencdrom_bdops;
- vcd->vcd_cdrom_info.disk = gd;
-
- spin_lock(&vcd_disks_lock);
- list_add(&(vcd->vcd_entry), &vcd_disks);
- spin_unlock(&vcd_disks_lock);
-out:
- return;
-err_out:
- kfree(vcd);
-}
-
-void unregister_vcd(struct blkfront_info *info) {
- struct gendisk * gd = info->gd;
- struct vcd_disk * vcd;
-
- spin_lock(&vcd_disks_lock);
- list_for_each_entry(vcd, &vcd_disks, vcd_entry) {
- if (vcd->vcd_cdrom_info.disk == gd) {
- spin_lock(&vcd->vcd_cdrom_info_lock);
- unregister_cdrom(&vcd->vcd_cdrom_info);
- list_del(&vcd->vcd_entry);
- spin_unlock(&vcd->vcd_cdrom_info_lock);
- kfree(vcd);
- break;
- }
- }
- spin_unlock(&vcd_disks_lock);
-}
-
LINUXINCLUDE += -I../xen/include/public/io
-obj-$(CONFIG_XEN_BLKDEV_TAP) := blktap.o
+obj-$(CONFIG_XEN_BLKDEV_TAP) := xenblktap.o
-blktap-y := xenbus.o interface.o blocktap.o
+xenblktap-y := xenbus.o interface.o blktap.o
#include <linux/spinlock.h>
#include <linux/kthread.h>
-#include <linux/freezer.h>
#include <linux/list.h>
#include <asm/hypervisor.h>
#include "common.h"
#include <linux/gfp.h>
#include <linux/poll.h>
#include <linux/delay.h>
-#include <linux/nsproxy.h>
#include <asm/tlbflush.h>
#define MAX_TAP_DEV 256 /*the maximum number of tapdisk ring devices */
unsigned long mode; /*current switching mode */
int minor; /*Minor number for tapdisk device */
pid_t pid; /*tapdisk process id */
- struct pid_namespace *pid_ns; /*... and its corresponding namespace */
enum { RUNNING, CLEANSHUTDOWN } status; /*Detect a clean userspace
shutdown */
unsigned long *idx_map; /*Record the user ring id to kern
* BLKTAP VM OPS
*/
-static int blktap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static struct page *blktap_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int *type)
{
/*
* if the page has not been mapped in by the driver then return
- * VM_FAULT_SIGBUS to the domain.
+ * NOPAGE_SIGBUS to the domain.
*/
- return VM_FAULT_SIGBUS;
+ return NOPAGE_SIGBUS;
}
static pte_t blktap_clear_pte(struct vm_area_struct *vma,
* if vm_file is NULL (meaning mmap failed and we have nothing to do)
*/
if (uvaddr < uvstart || vma->vm_file == NULL)
- return xen_ptep_get_and_clear_full(vma, uvaddr, ptep,
- is_fullmm);
+ return ptep_get_and_clear_full(vma->vm_mm, uvaddr,
+ ptep, is_fullmm);
info = vma->vm_file->private_data;
map = vma->vm_private_data;
BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap));
/* USING SHADOW PAGE TABLES. */
- copy = xen_ptep_get_and_clear_full(vma, uvaddr, ptep,
- is_fullmm);
+ copy = ptep_get_and_clear_full(vma->vm_mm, uvaddr, ptep,
+ is_fullmm);
}
if (count) {
}
struct vm_operations_struct blktap_vm_ops = {
- fault: blktap_fault,
+ nopage: blktap_nopage,
zap_pte: blktap_clear_pte,
};
tapfds[minor] = info;
if ((class = get_xen_class()) != NULL)
- device_create(class, NULL, MKDEV(blktap_major, minor),
- NULL, "blktap%d", minor);
+ class_device_create(class, NULL,
+ MKDEV(blktap_major, minor), NULL,
+ "blktap%d", minor);
}
out:
return;
if (info->pid > 0) {
- ptask = find_task_by_pid_ns(info->pid, info->pid_ns);
+ ptask = find_task_by_pid(info->pid);
if (ptask)
info->status = CLEANSHUTDOWN;
}
{
if (info) {
info->pid = (pid_t)arg;
- info->pid_ns = current->nsproxy->pid_ns;
- DPRINTK("blktap: pid received %p:%d\n",
- info->pid_ns, info->pid);
+ DPRINTK("blktap: pid received %d\n",
+ info->pid);
}
return 0;
}
static void mmap_req_del(int mmap)
{
- assert_spin_locked(&pending_free_lock);
+ BUG_ON(!spin_is_locked(&pending_free_lock));
kfree(pending_reqs[mmap]);
pending_reqs[mmap] = NULL;
static void print_stats(blkif_t *blkif)
{
- printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | pk %4d\n",
+ printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d\n",
current->comm, blkif->st_oo_req,
- blkif->st_rd_req, blkif->st_wr_req, blkif->st_pk_req);
+ blkif->st_rd_req, blkif->st_wr_req);
blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
blkif->st_rd_req = 0;
blkif->st_wr_req = 0;
blkif->st_oo_req = 0;
- blkif->st_pk_req = 0;
}
int tap_blkif_schedule(void *arg)
wake_up(&blkif->wq);
}
-irqreturn_t tap_blkif_be_int(int irq, void *dev_id)
+irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
{
blkif_notify_work(dev_id);
return IRQ_HANDLED;
dispatch_rw_block_io(blkif, &req, pending_req);
break;
- case BLKIF_OP_WRITE_BARRIER:
- /* TODO Some counter? */
- /* Fall through */
case BLKIF_OP_WRITE:
blkif->st_wr_req++;
dispatch_rw_block_io(blkif, &req, pending_req);
break;
- case BLKIF_OP_PACKET:
- blkif->st_pk_req++;
- dispatch_rw_block_io(blkif, &req, pending_req);
- break;
-
default:
/* A good sign something is wrong: sleep for a while to
* avoid excessive CPU consumption by a bad guest. */
pending_req_t *pending_req)
{
extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
- int op, operation;
+ int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
unsigned int nseg;
int ret, i, nr_sects = 0;
uint16_t mmap_idx = pending_req->mem_idx;
struct mm_struct *mm;
- switch (req->operation) {
- case BLKIF_OP_PACKET:
- /* Fall through */
- case BLKIF_OP_READ:
- operation = READ;
- break;
- case BLKIF_OP_WRITE:
- operation = WRITE;
- break;
- case BLKIF_OP_WRITE_BARRIER:
- operation = WRITE_BARRIER;
- break;
- default:
- operation = 0; /* make gcc happy */
- BUG();
- }
-
if (blkif->dev_num < 0 || blkif->dev_num > MAX_TAP_DEV)
goto fail_response;
pending_req->blkif = blkif;
pending_req->id = req->id;
- pending_req->operation = req->operation;
+ pending_req->operation = operation;
pending_req->status = BLKIF_RSP_OKAY;
pending_req->nr_pages = nseg;
op = 0;
kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
flags = GNTMAP_host_map;
- if (operation != READ)
+ if (operation == WRITE)
flags |= GNTMAP_readonly;
gnttab_set_map_op(&map[op], kvaddr, flags,
req->seg[i].gref, blkif->domid);
flags = GNTMAP_host_map | GNTMAP_application_map
| GNTMAP_contains_pte;
- if (operation != READ)
+ if (operation == WRITE)
flags |= GNTMAP_readonly;
gnttab_set_map_op(&map[op], ptep, flags,
req->seg[i].gref, blkif->domid);
* We only create the device when a request of a new device is
* made.
*/
- device_create(class, NULL, MKDEV(blktap_major, 0), NULL,
- "blktap0");
+ class_device_create(class, NULL,
+ MKDEV(blktap_major, 0), NULL,
+ "blktap0");
} else {
/* this is bad, but not fatal */
WPRINTK("blktap: sysfs xen_class not created\n");
+++ /dev/null
-#include "blktap.c"
wait_queue_head_t wq;
struct task_struct *xenblkd;
unsigned int waiting_reqs;
- struct request_queue *plug;
+ request_queue_t *plug;
/* statistics */
unsigned long st_print;
int st_rd_req;
int st_wr_req;
int st_oo_req;
- int st_pk_req;
int st_rd_sect;
int st_wr_sect;
void tap_blkif_xenbus_init(void);
-irqreturn_t tap_blkif_be_int(int irq, void *dev_id);
+irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
int tap_blkif_schedule(void *arg);
int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif);
#include "common.h"
#include <xen/evtchn.h>
-static struct kmem_cache *blkif_cachep;
+static kmem_cache_t *blkif_cachep;
blkif_t *tap_alloc_blkif(domid_t domid)
{
void __init tap_blkif_interface_init(void)
{
blkif_cachep = kmem_cache_create("blktapif_cache", sizeof(blkif_t),
- 0, 0, NULL);
+ 0, 0, NULL, NULL);
}
#include <linux/kthread.h>
#include <xen/xenbus.h>
#include "common.h"
-#include "../core/domctl.h"
struct backend_info
int err;
struct xenbus_device *dev = be->dev;
- struct xenbus_transaction xbt;
- /* Write feature-barrier to xenstore */
-again:
- err = xenbus_transaction_start(&xbt);
- if (err) {
- xenbus_dev_fatal(dev, err, "starting transaction");
- return;
- }
-
- err = xenbus_printf(xbt, dev->nodename, "feature-barrier", "1");
- if (err) {
- xenbus_dev_fatal(dev, err, "writing feature-barrier");
- xenbus_transaction_end(xbt, 1);
- return;
- }
-
- err = xenbus_transaction_end(xbt, 0);
- if (err == -EAGAIN)
- goto again;
-
- /* Switch state */
err = xenbus_switch_state(dev, XenbusStateConnected);
if (err)
xenbus_dev_fatal(dev, err, "switching to Connected state",
be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
"%63s", protocol, NULL);
- if (err) {
- strcpy(protocol, "unspecified");
- be->blkif->blk_protocol = xen_guest_blkif_protocol(be->blkif->domid);
- }
+ if (err)
+ strcpy(protocol, "unspecified, assuming native");
else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
-#if 1 /* maintain compatibility with early sles10-sp1 and paravirt netware betas */
- else if (0 == strcmp(protocol, "1"))
- be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
- else if (0 == strcmp(protocol, "2"))
- be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
-#endif
else {
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -1;
static struct xenbus_driver blktap = {
.name = "tap",
+ .owner = THIS_MODULE,
.ids = blktap_ids,
.probe = blktap_probe,
.remove = blktap_remove,
void tap_blkif_xenbus_init(void)
{
- if (xenbus_register_backend(&blktap))
- BUG();
+ xenbus_register_backend(&blktap);
}
# Makefile for the linux kernel.
#
-obj-y := evtchn.o gnttab.o features.o reboot.o machine_reboot.o firmware.o domctl.o
+obj-y := evtchn.o gnttab.o features.o reboot.o machine_reboot.o firmware.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_PROC_FS) += xen_proc.o
+++ /dev/null
-/*
- * !!! dirty hack alert !!!
- *
- * Problem: old guests kernels don't have a "protocol" node
- * in the frontend xenstore directory, so mixing
- * 32 and 64bit domains doesn't work.
- *
- * Upstream plans to solve this in the tools, by letting them
- * create a protocol node. Which certainly makes sense.
- * But it isn't trivial and isn't done yet. Too bad.
- *
- * So for the time being we use the get_address_size domctl
- * hypercall for a pretty good guess. Not nice as the domctl
- * hypercall isn't supposed to be used by the kernel. Because
- * we don't want to have dependencies between dom0 kernel and
- * xen kernel versions. Now we have one. Ouch.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/hypervisor.h>
-#include <xen/blkif.h>
-
-#include "domctl.h"
-
-/* stuff copied from xen/interface/domctl.h, which we can't
- * include directly for the reasons outlined above .... */
-
-#define XEN_DOMCTL_get_address_size 36
-typedef struct xen_domctl_address_size {
- uint32_t size;
-} xen_domctl_address_size_t;
-
-typedef __attribute__((aligned(8))) uint64_t uint64_aligned_t;
-
-union xen_domctl {
- /* v4: sles10 sp1: xen 3.0.4 + 32-on-64 patches */
- struct {
- uint32_t cmd;
- uint32_t interface_version;
- domid_t domain;
- union {
- /* left out lots of other struct xen_domctl_foobar */
- struct xen_domctl_address_size address_size;
- uint64_t dummy_align;
- uint8_t dummy_pad[128];
- } u;
- } v4;
-
- /* v5: upstream: xen 3.1 */
- struct {
- uint32_t cmd;
- uint32_t interface_version;
- domid_t domain;
- union {
- struct xen_domctl_address_size address_size;
- uint64_aligned_t dummy_align;
- uint8_t dummy_pad[128];
- } u;
- } v5;
-};
-
-/* The actual code comes here */
-
-static inline int hypervisor_domctl(void *domctl)
-{
- return _hypercall1(int, domctl, domctl);
-}
-
-int xen_guest_address_size(int domid)
-{
- union xen_domctl domctl;
- int low, ret;
-
-#define guest_address_size(ver) do { \
- memset(&domctl, 0, sizeof(domctl)); \
- domctl.v##ver.cmd = XEN_DOMCTL_get_address_size; \
- domctl.v##ver.interface_version = low = ver; \
- domctl.v##ver.domain = domid; \
- ret = hypervisor_domctl(&domctl) ?: domctl.v##ver.u.address_size.size; \
- if (ret == 32 || ret == 64) { \
- printk("v" #ver " domctl worked ok: dom%d is %d-bit\n", \
- domid, ret); \
- return ret; \
- } \
-} while (0)
-
- guest_address_size(5);
-#if CONFIG_XEN_COMPAT < 0x030100
- guest_address_size(4);
-#endif
-
- ret = BITS_PER_LONG;
- printk("v%d...5 domctls failed, assuming dom%d is native: %d\n",
- low, domid, ret);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(xen_guest_address_size);
-
-int xen_guest_blkif_protocol(int domid)
-{
- int address_size = xen_guest_address_size(domid);
-
- if (address_size == BITS_PER_LONG)
- return BLKIF_PROTOCOL_NATIVE;
- if (address_size == 32)
- return BLKIF_PROTOCOL_X86_32;
- if (address_size == 64)
- return BLKIF_PROTOCOL_X86_64;
- return BLKIF_PROTOCOL_NATIVE;
-}
-EXPORT_SYMBOL_GPL(xen_guest_blkif_protocol);
+++ /dev/null
-int xen_guest_address_size(int domid);
-int xen_guest_blkif_protocol(int domid);
if (IS_ERR(eth_name)) {
/* Probably means not present */
- DPRINTK("%s: no match due to xenbus_read accel error %ld\n",
+ DPRINTK("%s: no match due to xenbus_read accel error %d\n",
__FUNCTION__, PTR_ERR(eth_name));
return 0;
} else {
/* Enforce draining of the transmit queue. */
struct timer_list tx_queue_timeout;
+ /* Statistics */
+ int nr_copied_skbs;
+
/* Miscellaneous private stuff. */
struct list_head list; /* scheduling list */
atomic_t refcnt;
int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
struct net_device_stats *netif_be_get_stats(struct net_device *dev);
-irqreturn_t netif_be_int(int irq, void *dev_id);
+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
static inline int netbk_can_queue(struct net_device *dev)
{
* blocked.
*/
static unsigned long netbk_queue_length = 32;
-module_param_named(queue_length, netbk_queue_length, ulong, 0);
+module_param_named(queue_length, netbk_queue_length, ulong, 0644);
static void __netif_up(netif_t *netif)
{
return ethtool_op_set_tso(dev, data);
}
+static void netbk_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "netbk");
+ strcpy(info->bus_info, dev->class_dev.dev->bus_id);
+}
+
+static const struct netif_stat {
+ char name[ETH_GSTRING_LEN];
+ u16 offset;
+} netbk_stats[] = {
+ { "copied_skbs", offsetof(netif_t, nr_copied_skbs) },
+};
+
+static int netbk_get_stats_count(struct net_device *dev)
+{
+ return ARRAY_SIZE(netbk_stats);
+}
+
+static void netbk_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ void *netif = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(netbk_stats); i++)
+ data[i] = *(int *)(netif + netbk_stats[i].offset);
+}
+
+static void netbk_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(netbk_stats); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ netbk_stats[i].name, ETH_GSTRING_LEN);
+ break;
+ }
+}
+
static struct ethtool_ops network_ethtool_ops =
{
+ .get_drvinfo = netbk_get_drvinfo,
+
.get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = netbk_set_tso,
.get_link = ethtool_op_get_link,
+
+ .get_stats_count = netbk_get_stats_count,
+ .get_ethtool_stats = netbk_get_ethtool_stats,
+ .get_strings = netbk_get_strings,
};
netif_t *netif_alloc(domid_t domid, unsigned int handle)
netif->dev->name, netif);
if (err < 0)
goto err_hypervisor;
- BUG_ON(err < DYNIRQ_BASE || err >= DYNIRQ_BASE + NR_DYNIRQS);
netif->irq = err;
disable_irq(netif->irq);
#include <net/dst.h>
#include <net/xfrm.h> /* secpath_reset() */
#include <asm/hypervisor.h> /* is_initial_xendomain() */
-#include <../net/core/kmap_skb.h> /* k{,un}map_skb_frag() */
static int nloopbacks = -1;
module_param(nloopbacks, int, 0);
np->stats.rx_bytes += skb->len;
np->stats.rx_packets++;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->ip_summed == CHECKSUM_HW) {
/* Defer checksum calculation. */
skb->proto_csum_blank = 1;
/* Must be a local packet: assert its integrity. */
char dev_name[IFNAMSIZ];
sprintf(dev_name, "vif0.%d", i);
- dev1 = dev_get_by_name(&init_net, dev_name);
+ dev1 = dev_get_by_name(dev_name);
sprintf(dev_name, "veth%d", i);
- dev2 = dev_get_by_name(&init_net, dev_name);
+ dev2 = dev_get_by_name(dev_name);
if (dev1 && dev2) {
unregister_netdev(dev2);
unregister_netdev(dev1);
static struct timer_list net_timer;
static struct timer_list netbk_tx_pending_timer;
-#define MAX_PENDING_REQS (1U << CONFIG_XEN_NETDEV_TX_SHIFT)
+#define MAX_PENDING_REQS 256
static struct sk_buff_head rx_queue;
goto err;
skb_reserve(nskb, 16 + NET_IP_ALIGN);
- headlen = skb_end_pointer(nskb) - nskb->data;
+ headlen = nskb->end - nskb->data;
if (headlen > skb_headlen(skb))
headlen = skb_headlen(skb);
ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
len -= copy;
}
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- offset = 0;
-#else
offset = nskb->data - skb->data;
-#endif
- nskb->transport_header = skb->transport_header + offset;
- nskb->network_header = skb->network_header + offset;
- nskb->mac_header = skb->mac_header + offset;
+ nskb->h.raw = skb->h.raw + offset;
+ nskb->nh.raw = skb->nh.raw + offset;
+ nskb->mac.raw = skb->mac.raw + offset;
return nskb;
{
static struct net_device *eth0_dev = NULL;
if (unlikely(eth0_dev == NULL))
- eth0_dev = __dev_get_by_name(&init_net, "eth0");
- netif_rx_schedule(eth0_dev, ???);
+ eth0_dev = __dev_get_by_name("eth0");
+ netif_rx_schedule(eth0_dev);
}
/*
* Add following to poll() function in NAPI driver (Tigon3 is example):
static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE];
static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE];
- static DECLARE_BITMAP(rx_notify, NR_DYNIRQS);
-#if NR_DYNIRQS <= 0x10000
+ static unsigned char rx_notify[NR_IRQS];
static u16 notify_list[NET_RX_RING_SIZE];
-#else
- static int notify_list[NET_RX_RING_SIZE];
-#endif
static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
struct netrx_pending_operations npo = {
id = meta[npo.meta_cons].id;
flags = nr_frags ? NETRXF_more_data : 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
+ if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
flags |= NETRXF_csum_blank | NETRXF_data_validated;
else if (skb->proto_data_valid) /* remote but checksummed? */
flags |= NETRXF_data_validated;
nr_frags);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
- irq = netif->irq - DYNIRQ_BASE;
- if (ret && !__test_and_set_bit(irq, rx_notify))
+ irq = netif->irq;
+ if (ret && !rx_notify[irq]) {
+ rx_notify[irq] = 1;
notify_list[notify_nr++] = irq;
+ }
if (netif_queue_stopped(netif->dev) &&
netif_schedulable(netif) &&
npo.meta_cons += nr_frags + 1;
}
- if (notify_nr == 1) {
- irq = *notify_list;
- __clear_bit(irq, rx_notify);
- notify_remote_via_irq(irq + DYNIRQ_BASE);
- } else {
- for (count = ret = 0; ret < notify_nr; ++ret) {
- irq = notify_list[ret];
- __clear_bit(irq, rx_notify);
- if (!multi_notify_remote_via_irq(rx_mcl + count,
- irq + DYNIRQ_BASE))
- ++count;
- }
- if (HYPERVISOR_multicall(rx_mcl, count))
- BUG();
+ while (notify_nr != 0) {
+ irq = notify_list[--notify_nr];
+ rx_notify[irq] = 0;
+ notify_remote_via_irq(irq);
}
/* More work to do? */
if (time_after(inuse->alloc_time + HZ / 2, jiffies))
break;
- switch (copy_pending_req(inuse - pending_inuse)) {
+ pending_idx = inuse - pending_inuse;
+
+ pending_tx_info[pending_idx].netif->nr_copied_skbs++;
+
+ switch (copy_pending_req(pending_idx)) {
case 0:
list_move_tail(&inuse->list, &list);
continue;
net_tx_action_dealloc();
mop = tx_map_ops;
- BUILD_BUG_ON(MAX_SKB_FRAGS >= MAX_PENDING_REQS);
while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
!list_empty(&net_schedule_list)) {
/* Get a netif from the list with work to do. */
netif_idx_release(idx);
}
-irqreturn_t netif_be_int(int irq, void *dev_id)
+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
{
netif_t *netif = dev_id;
}
#ifdef NETBE_DEBUG_INTERRUPT
-static irqreturn_t netif_be_dbg(int irq, void *dev_id)
+static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
{
struct list_head *ent;
netif_t *netif;
return IRQ_HANDLED;
}
-
-static struct irqaction netif_be_dbg_action = {
- .handler = netif_be_dbg,
- .flags = IRQF_SHARED,
- .name = "net-be-dbg"
-};
#endif
static int __init netback_init(void)
netif_xenbus_init();
#ifdef NETBE_DEBUG_INTERRUPT
- (void)bind_virq_to_irqaction(VIRQ_DEBUG,
- 0,
- &netif_be_dbg_action);
+ (void)bind_virq_to_irqhandler(VIRQ_DEBUG,
+ 0,
+ netif_be_dbg,
+ SA_SHIRQ,
+ "net-be-dbg",
+ &netif_be_dbg);
#endif
return 0;
* and vif variables to the environment, for the benefit of the vif-* hotplug
* scripts.
*/
-static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *env)
+static int netback_uevent(struct xenbus_device *xdev, char **envp,
+ int num_envp, char *buffer, int buffer_size)
{
struct backend_info *be = xdev->dev.driver_data;
netif_t *netif = be->netif;
+ int i = 0, length = 0;
char *val;
DPRINTK("netback_uevent");
return err;
}
else {
- add_uevent_var(env, "script=%s", val);
+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
+ &length, "script=%s", val);
kfree(val);
}
- add_uevent_var(env, "vif=%s", netif->dev->name);
+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
+ "vif=%s", netif->dev->name);
+
+ envp[i] = NULL;
return 0;
}
xenbus_dev_fatal(dev, err, "creating interface");
return;
}
+ SET_NETDEV_DEV(be->netif->dev, &dev->dev);
kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
}
static struct xenbus_driver netback = {
.name = "vif",
+ .owner = THIS_MODULE,
.ids = netback_ids,
.probe = netback_probe,
.remove = netback_remove,
void netif_xenbus_init(void)
{
- if (xenbus_register_backend(&netback))
- BUG();
+ xenbus_register_backend(&netback);
}
* IN THE SOFTWARE.
*/
-#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/list.h>
DPRINTK("%p\n",vif_state);
/* Make sure there are no data path operations going on */
- napi_disable(&vif_state->np->napi);
+ netif_poll_disable(vif_state->np->netdev);
netif_tx_lock_bh(vif_state->np->netdev);
vif_state->hooks = vif_state->np->accelerator->hooks;
netif_tx_unlock_bh(vif_state->np->netdev);
- napi_enable(&vif_state->np->napi);
+ netif_poll_enable(vif_state->np->netdev);
}
struct netfront_accel_vif_state *vif_state)
{
/* Make sure there are no data path operations going on */
- napi_disable(&vif_state->np->napi);
+ netif_poll_disable(vif_state->np->netdev);
netif_tx_lock_bh(vif_state->np->netdev);
/*
vif_state->hooks = NULL;
netif_tx_unlock_bh(vif_state->np->netdev);
- napi_enable(&vif_state->np->napi);
+ netif_poll_enable(vif_state->np->netdev);
}
{
return skb_is_gso(skb) &&
(!skb_gso_ok(skb, dev->features) ||
- unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
+ unlikely(skb->ip_summed != CHECKSUM_HW));
}
#else
#define HAVE_GSO 0
static void network_alloc_rx_buffers(struct net_device *);
static void send_fake_arp(struct net_device *);
-static irqreturn_t netif_int(int irq, void *dev_id);
+static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
#ifdef CONFIG_SYSFS
static int xennet_sysfs_addif(struct net_device *netdev);
memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
err = bind_listening_port_to_irqhandler(
- dev->otherend_id, netif_int, IRQF_SAMPLE_RANDOM, netdev->name,
+ dev->otherend_id, netif_int, SA_SAMPLE_RANDOM, netdev->name,
netdev);
if (err < 0)
goto fail;
struct netfront_info *np = netdev_priv(dev);
memset(&np->stats, 0, sizeof(np->stats));
- napi_enable(&np->napi);
spin_lock_bh(&np->rx_lock);
if (netfront_carrier_ok(np)) {
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){
netfront_accelerator_call_stop_napi_irq(np, dev);
- netif_rx_schedule(dev, &np->napi);
+ netif_rx_schedule(dev);
}
}
spin_unlock_bh(&np->rx_lock);
- netif_start_queue(dev);
+ network_maybe_wake_tx(dev);
return 0;
}
netfront_accelerator_call_stop_napi_irq(np, dev);
- netif_rx_schedule(dev, &np->napi);
+ netif_rx_schedule(dev);
}
static void network_alloc_rx_buffers(struct net_device *dev)
tx->flags = 0;
extra = NULL;
- if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
+ if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
#ifdef CONFIG_XEN
if (skb->proto_data_valid) /* remote but checksummed? */
return 0;
}
-static irqreturn_t netif_int(int irq, void *dev_id)
+static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
{
struct net_device *dev = dev_id;
struct netfront_info *np = netdev_priv(dev);
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) {
netfront_accelerator_call_stop_napi_irq(np, dev);
- netif_rx_schedule(dev, &np->napi);
+ netif_rx_schedule(dev);
dev->last_rx = jiffies;
}
}
#endif
}
-static int netif_poll(struct napi_struct *napi, int budget)
+static int netif_poll(struct net_device *dev, int *pbudget)
{
- struct netfront_info *np = container_of(napi, struct netfront_info, napi);
- struct net_device *dev = np->netdev;
+ struct netfront_info *np = netdev_priv(dev);
struct sk_buff *skb;
struct netfront_rx_info rinfo;
struct netif_rx_response *rx = &rinfo.rx;
struct netif_extra_info *extras = rinfo.extras;
RING_IDX i, rp;
struct multicall_entry *mcl;
- int work_done, more_to_do = 1, accel_more_to_do = 1;
+ int work_done, budget, more_to_do = 1, accel_more_to_do = 1;
struct sk_buff_head rxq;
struct sk_buff_head errq;
struct sk_buff_head tmpq;
skb_queue_head_init(&errq);
skb_queue_head_init(&tmpq);
+ if ((budget = *pbudget) > dev->quota)
+ budget = dev->quota;
rp = np->rx.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
}
}
- __skb_queue_purge(&errq);
+ while ((skb = __skb_dequeue(&errq)))
+ kfree_skb(skb);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
struct page *page = NETFRONT_SKB_CB(skb)->page;
accel_more_to_do = 0;
}
+ *pbudget -= work_done;
+ dev->quota -= work_done;
+
if (work_done < budget) {
local_irq_save(flags);
}
if (!more_to_do && !accel_more_to_do)
- __netif_rx_complete(dev, napi);
+ __netif_rx_complete(dev);
local_irq_restore(flags);
}
spin_unlock(&np->rx_lock);
- return work_done;
+ return more_to_do | accel_more_to_do;
}
static void netif_release_tx_bufs(struct netfront_info *np)
}
}
- __skb_queue_purge(&free_list);
+ while ((skb = __skb_dequeue(&free_list)) != NULL)
+ dev_kfree_skb(skb);
spin_unlock_bh(&np->rx_lock);
}
{
struct netfront_info *np = netdev_priv(dev);
netif_stop_queue(np->netdev);
- napi_disable(&np->napi);
return 0;
}
};
#ifdef CONFIG_SYSFS
-static ssize_t show_rxbuf_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t show_rxbuf_min(struct class_device *cd, char *buf)
{
- struct netfront_info *info = netdev_priv(to_net_dev(dev));
+ struct net_device *netdev = container_of(cd, struct net_device,
+ class_dev);
+ struct netfront_info *info = netdev_priv(netdev);
return sprintf(buf, "%u\n", info->rx_min_target);
}
-static ssize_t store_rxbuf_min(struct device *dev,
- struct device_attribute *attr,
+static ssize_t store_rxbuf_min(struct class_device *cd,
const char *buf, size_t len)
{
- struct net_device *netdev = to_net_dev(dev);
+ struct net_device *netdev = container_of(cd, struct net_device,
+ class_dev);
struct netfront_info *np = netdev_priv(netdev);
char *endp;
unsigned long target;
return len;
}
-static ssize_t show_rxbuf_max(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t show_rxbuf_max(struct class_device *cd, char *buf)
{
- struct netfront_info *info = netdev_priv(to_net_dev(dev));
+ struct net_device *netdev = container_of(cd, struct net_device,
+ class_dev);
+ struct netfront_info *info = netdev_priv(netdev);
return sprintf(buf, "%u\n", info->rx_max_target);
}
-static ssize_t store_rxbuf_max(struct device *dev,
- struct device_attribute *attr,
+static ssize_t store_rxbuf_max(struct class_device *cd,
const char *buf, size_t len)
{
- struct net_device *netdev = to_net_dev(dev);
+ struct net_device *netdev = container_of(cd, struct net_device,
+ class_dev);
struct netfront_info *np = netdev_priv(netdev);
char *endp;
unsigned long target;
return len;
}
-static ssize_t show_rxbuf_cur(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t show_rxbuf_cur(struct class_device *cd, char *buf)
{
- struct netfront_info *info = netdev_priv(to_net_dev(dev));
+ struct net_device *netdev = container_of(cd, struct net_device,
+ class_dev);
+ struct netfront_info *info = netdev_priv(netdev);
return sprintf(buf, "%u\n", info->rx_target);
}
-static struct device_attribute xennet_attrs[] = {
+static const struct class_device_attribute xennet_attrs[] = {
__ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
__ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
__ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
int error = 0;
for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
- error = device_create_file(&netdev->dev,
- &xennet_attrs[i]);
+ error = class_device_create_file(&netdev->class_dev,
+ &xennet_attrs[i]);
if (error)
goto fail;
}
fail:
while (--i >= 0)
- device_remove_file(&netdev->dev, &xennet_attrs[i]);
+ class_device_remove_file(&netdev->class_dev,
+ &xennet_attrs[i]);
return error;
}
{
int i;
- for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
- device_remove_file(&netdev->dev, &xennet_attrs[i]);
+ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
+ class_device_remove_file(&netdev->class_dev,
+ &xennet_attrs[i]);
+ }
}
#endif /* CONFIG_SYSFS */
netdev->hard_start_xmit = network_start_xmit;
netdev->stop = network_close;
netdev->get_stats = network_get_stats;
- netif_napi_add(netdev, &np->napi, netif_poll, 64);
+ netdev->poll = netif_poll;
netdev->set_multicast_list = network_set_multicast_list;
netdev->uninit = netif_uninit;
netdev->set_mac_address = xennet_set_mac_address;
netdev->change_mtu = xennet_change_mtu;
+ netdev->weight = 64;
netdev->features = NETIF_F_IP_CSUM;
SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
+ SET_MODULE_OWNER(netdev);
SET_NETDEV_DEV(netdev, &dev->dev);
np->netdev = netdev;
static struct xenbus_driver netfront_driver = {
.name = "vif",
+ .owner = THIS_MODULE,
.ids = netfront_ids,
.probe = netfront_probe,
.remove = __devexit_p(netfront_remove),
spinlock_t tx_lock;
spinlock_t rx_lock;
- struct napi_struct napi;
-
unsigned int irq;
unsigned int copying_receiver;
unsigned int carrier;
pciback-$(CONFIG_XEN_PCIDEV_BACKEND_PASS) += passthrough.o
pciback-$(CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER) += controller.o
-ccflags-$(CONFIG_XEN_PCIDEV_BE_DEBUG) += -DDEBUG
+ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
#include "conf_space.h"
#include "conf_space_quirks.h"
+static int permissive;
+module_param(permissive, bool, 0644);
+
#define DEFINE_PCI_CONFIG(op,size,type) \
int pciback_##op##_config_##size \
(struct pci_dev *dev, int offset, type value, void *data) \
* This means that some fields may still be read-only because
* they have entries in the config_field list that intercept
* the write and do nothing. */
- if (dev_data->permissive) {
+ if (dev_data->permissive || permissive) {
switch (size) {
case 1:
err = pci_write_config_byte(dev, offset,
{
int err;
- if (!atomic_read(&dev->enable_cnt) && is_enable_cmd(value)) {
+ if (!dev->is_enabled && is_enable_cmd(value)) {
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: enable\n",
pci_name(dev));
err = pci_enable_device(dev);
if (err)
return err;
- } else if (atomic_read(&dev->enable_cnt) && !is_enable_cmd(value)) {
+ } else if (dev->is_enabled && !is_enable_cmd(value)) {
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: disable\n",
pci_name(dev));
kfree(dev_data);
pdev->pci_dev_data = NULL;
}
+
+int pciback_get_pcifront_dev(struct pci_dev *pcidev,
+ struct pciback_device *pdev,
+ unsigned int *domain, unsigned int *bus, unsigned int *devfn)
+{
+ struct controller_dev_data *dev_data = pdev->pci_dev_data;
+ struct controller_dev_entry *dev_entry;
+ struct controller_list_entry *cntrl_entry;
+ unsigned long flags;
+ int found = 0;
+ spin_lock_irqsave(&dev_data->lock, flags);
+
+ list_for_each_entry(cntrl_entry, &dev_data->list, list) {
+ list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
+ if ( (dev_entry->dev->bus->number ==
+ pcidev->bus->number) &&
+ (dev_entry->dev->devfn ==
+ pcidev->devfn) &&
+ (pci_domain_nr(dev_entry->dev->bus) ==
+ pci_domain_nr(pcidev->bus)))
+ {
+ found = 1;
+ *domain = cntrl_entry->domain;
+ *bus = cntrl_entry->bus;
+ *devfn = dev_entry->devfn;
+ goto out;
+ }
+ }
+ }
+out:
+ spin_unlock_irqrestore(&dev_data->lock, flags);
+ return found;
+
+}
+
kfree(dev_data);
pdev->pci_dev_data = NULL;
}
+
+int pciback_get_pcifront_dev(struct pci_dev *pcidev, struct pciback_device *pdev,
+ unsigned int *domain, unsigned int *bus, unsigned int *devfn)
+
+{
+ *domain = pci_domain_nr(pcidev->bus);
+ *bus = pcidev->bus->number;
+ *devfn = pcidev->devfn;
+ return 1;
+}
*/
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/rwsem.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/kref.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
#include <asm/atomic.h>
+#include <xen/evtchn.h>
#include "pciback.h"
#include "conf_space.h"
#include "conf_space_quirks.h"
static char *pci_devs_to_hide = NULL;
+wait_queue_head_t aer_wait_queue;
+/*Add sem for sync AER handling and pciback remove/reconfigue ops,
+* We want to avoid in middle of AER ops, pciback devices is being removed
+*/
+static DECLARE_RWSEM(pcistub_sem);
module_param_named(hide, pci_devs_to_hide, charp, 0444);
struct pcistub_device_id {
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ /*hold this lock for avoiding breaking link between
+ * pcistub and pciback when AER is in processing
+ */
+ down_write(&pcistub_sem);
/* Cleanup our device
* (so it's ready for the next domain)
*/
spin_unlock_irqrestore(&found_psdev->lock, flags);
pcistub_device_put(found_psdev);
+ up_write(&pcistub_sem);
}
static int __devinit pcistub_match_one(struct pci_dev *dev,
pci_set_drvdata(dev, dev_data);
dev_dbg(&dev->dev, "initializing config\n");
+
+ init_waitqueue_head(&aer_wait_queue);
err = pciback_config_init_dev(dev);
if (err)
goto out;
dev_info(&dev->dev, "seizing device\n");
err = pcistub_seize(dev);
+#ifdef CONFIG_PCI_GUESTDEV
+ } else if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
+ if (!pci_is_guestdev(dev)) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&dev->dev, "seizing device\n");
+ err = pcistub_seize(dev);
+#endif /* CONFIG_PCI_GUESTDEV */
} else
/* Didn't find the device */
err = -ENODEV;
{0,},
};
+static void kill_domain_by_device(struct pcistub_device *psdev)
+{
+ struct xenbus_transaction xbt;
+ int err;
+ char nodename[1024];
+
+ if (!psdev)
+ dev_err(&psdev->dev->dev,
+ "device is NULL when do AER recovery/kill_domain\n");
+ sprintf(nodename, "/local/domain/0/backend/pci/%d/0",
+ psdev->pdev->xdev->otherend_id);
+ nodename[strlen(nodename)] = '\0';
+
+again:
+ err = xenbus_transaction_start(&xbt);
+ if (err)
+ {
+ dev_err(&psdev->dev->dev,
+ "error %d when start xenbus transaction\n", err);
+ return;
+ }
+ /*PV AER handlers will set this flag*/
+ xenbus_printf(xbt, nodename, "aerState" , "aerfail" );
+ err = xenbus_transaction_end(xbt, 0);
+ if (err)
+ {
+ if (err == -EAGAIN)
+ goto again;
+ dev_err(&psdev->dev->dev,
+ "error %d when end xenbus transaction\n", err);
+ return;
+ }
+}
+
+/* For each aer recovery step error_detected, mmio_enabled, etc, front_end and
+ * backend need to have cooperation. In pciback, those steps will do similar
+ * jobs: send service request and waiting for front_end response.
+*/
+static pci_ers_result_t common_process(struct pcistub_device *psdev,
+ pci_channel_state_t state, int aer_cmd, pci_ers_result_t result)
+{
+ pci_ers_result_t res = result;
+ struct xen_pcie_aer_op *aer_op;
+ int ret;
+
+ /*with PV AER drivers*/
+ aer_op = &(psdev->pdev->sh_info->aer_op);
+ aer_op->cmd = aer_cmd ;
+ /*useful for error_detected callback*/
+ aer_op->err = state;
+ /*pcifront_end BDF*/
+ ret = pciback_get_pcifront_dev(psdev->dev, psdev->pdev,
+ &aer_op->domain, &aer_op->bus, &aer_op->devfn);
+ if (!ret) {
+ dev_err(&psdev->dev->dev,
+ "pciback: failed to get pcifront device\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+ wmb();
+
+ dev_dbg(&psdev->dev->dev,
+ "pciback: aer_op %x dom %x bus %x devfn %x\n",
+ aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn);
+ /*local flag to mark there's aer request, pciback callback will use this
+ * flag to judge whether we need to check pci-front give aer service
+ * ack signal
+ */
+ set_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
+
+ /*It is possible that a pcifront conf_read_write ops request invokes
+ * the callback which cause the spurious execution of wake_up.
+ * Yet it is harmless and better than a spinlock here
+ */
+ set_bit(_XEN_PCIB_active,
+ (unsigned long *)&psdev->pdev->sh_info->flags);
+ wmb();
+ notify_remote_via_irq(psdev->pdev->evtchn_irq);
+
+ ret = wait_event_timeout(aer_wait_queue, !(test_bit(_XEN_PCIB_active,
+ (unsigned long *)&psdev->pdev->sh_info->flags)), 300*HZ);
+
+ if (!ret) {
+ if (test_bit(_XEN_PCIB_active,
+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
+ dev_err(&psdev->dev->dev,
+ "pcifront aer process not responding!\n");
+ clear_bit(_XEN_PCIB_active,
+ (unsigned long *)&psdev->pdev->sh_info->flags);
+ aer_op->err = PCI_ERS_RESULT_NONE;
+ return res;
+ }
+ }
+ clear_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
+
+ if ( test_bit( _XEN_PCIF_active,
+ (unsigned long*)&psdev->pdev->sh_info->flags)) {
+ dev_dbg(&psdev->dev->dev,
+ "schedule pci_conf service in pciback \n");
+ test_and_schedule_op(psdev->pdev);
+ }
+
+ res = (pci_ers_result_t)aer_op->err;
+ return res;
+}
+
+/*
+* pciback_slot_reset: it will send the slot_reset request to pcifront in case
+* of the device driver could provide this service, and then wait for pcifront
+* ack.
+* @dev: pointer to PCI devices
+* return value is used by aer_core do_recovery policy
+*/
+static pci_ers_result_t pciback_slot_reset(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev;
+ pci_ers_result_t result;
+
+ result = PCI_ERS_RESULT_RECOVERED;
+ dev_dbg(&dev->dev, "pciback_slot_reset(bus:%x,devfn:%x)\n",
+ dev->bus->number, dev->devfn);
+
+ down_write(&pcistub_sem);
+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
+ dev->bus->number,
+ PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+ if ( !psdev || !psdev->pdev || !psdev->pdev->sh_info )
+ {
+ dev_err(&dev->dev,
+ "pciback device is not found/in use/connected!\n");
+ goto end;
+ }
+ if ( !test_bit(_XEN_PCIB_AERHANDLER,
+ (unsigned long *)&psdev->pdev->sh_info->flags) ) {
+ dev_err(&dev->dev,
+ "guest with no AER driver should have been killed\n");
+ goto release;
+ }
+ result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
+
+ if (result == PCI_ERS_RESULT_NONE ||
+ result == PCI_ERS_RESULT_DISCONNECT) {
+ dev_dbg(&dev->dev,
+ "No AER slot_reset service or disconnected!\n");
+ kill_domain_by_device(psdev);
+ }
+release:
+ pcistub_device_put(psdev);
+end:
+ up_write(&pcistub_sem);
+ return result;
+
+}
+
+
+/*pciback_mmio_enabled: it will send the mmio_enabled request to pcifront
+* in case of the device driver could provide this service, and then wait
+* for pcifront ack.
+* @dev: pointer to PCI devices
+* return value is used by aer_core do_recovery policy
+*/
+
+static pci_ers_result_t pciback_mmio_enabled(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev;
+ pci_ers_result_t result;
+
+ result = PCI_ERS_RESULT_RECOVERED;
+ dev_dbg(&dev->dev, "pciback_mmio_enabled(bus:%x,devfn:%x)\n",
+ dev->bus->number, dev->devfn);
+
+ down_write(&pcistub_sem);
+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
+ dev->bus->number,
+ PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+ if ( !psdev || !psdev->pdev || !psdev->pdev->sh_info)
+ {
+ dev_err(&dev->dev,
+ "pciback device is not found/in use/connected!\n");
+ goto end;
+ }
+ if ( !test_bit(_XEN_PCIB_AERHANDLER,
+ (unsigned long *)&psdev->pdev->sh_info->flags) ) {
+ dev_err(&dev->dev,
+ "guest with no AER driver should have been killed\n");
+ goto release;
+ }
+ result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
+
+ if (result == PCI_ERS_RESULT_NONE ||
+ result == PCI_ERS_RESULT_DISCONNECT) {
+ dev_dbg(&dev->dev,
+ "No AER mmio_enabled service or disconnected!\n");
+ kill_domain_by_device(psdev);
+ }
+release:
+ pcistub_device_put(psdev);
+end:
+ up_write(&pcistub_sem);
+ return result;
+}
+
+/*pciback_error_detected: it will send the error_detected request to pcifront
+* in case of the device driver could provide this service, and then wait
+* for pcifront ack.
+* @dev: pointer to PCI devices
+* @error: the current PCI connection state
+* return value is used by aer_core do_recovery policy
+*/
+
+static pci_ers_result_t pciback_error_detected(struct pci_dev *dev,
+ pci_channel_state_t error)
+{
+ struct pcistub_device *psdev;
+ pci_ers_result_t result;
+
+ result = PCI_ERS_RESULT_CAN_RECOVER;
+ dev_dbg(&dev->dev, "pciback_error_detected(bus:%x,devfn:%x)\n",
+ dev->bus->number, dev->devfn);
+
+ down_write(&pcistub_sem);
+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
+ dev->bus->number,
+ PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+ if ( !psdev || !psdev->pdev || !psdev->pdev->sh_info)
+ {
+ dev_err(&dev->dev,
+ "pciback device is not found/in use/connected!\n");
+ goto end;
+ }
+ /*Guest owns the device yet no aer handler regiested, kill guest*/
+ if ( !test_bit(_XEN_PCIB_AERHANDLER,
+ (unsigned long *)&psdev->pdev->sh_info->flags) ) {
+ dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n");
+ kill_domain_by_device(psdev);
+ goto release;
+ }
+ result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result);
+
+ if (result == PCI_ERS_RESULT_NONE ||
+ result == PCI_ERS_RESULT_DISCONNECT) {
+ dev_dbg(&dev->dev,
+ "No AER error_detected service or disconnected!\n");
+ kill_domain_by_device(psdev);
+ }
+release:
+ pcistub_device_put(psdev);
+end:
+ up_write(&pcistub_sem);
+ return result;
+}
+
+/*pciback_error_resume: it will send the error_resume request to pcifront
+* in case of the device driver could provide this service, and then wait
+* for pcifront ack.
+* @dev: pointer to PCI devices
+*/
+
+static void pciback_error_resume(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev;
+
+ dev_dbg(&dev->dev, "pciback_error_resume(bus:%x,devfn:%x)\n",
+ dev->bus->number, dev->devfn);
+
+ down_write(&pcistub_sem);
+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
+ dev->bus->number,
+ PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+ if ( !psdev || !psdev->pdev || !psdev->pdev->sh_info)
+ {
+ dev_err(&dev->dev,
+ "pciback device is not found/in use/connected!\n");
+ goto end;
+ }
+
+ if ( !test_bit(_XEN_PCIB_AERHANDLER,
+ (unsigned long *)&psdev->pdev->sh_info->flags) ) {
+ dev_err(&dev->dev,
+ "guest with no AER driver should have been killed\n");
+ kill_domain_by_device(psdev);
+ goto release;
+ }
+ common_process(psdev, 1, XEN_PCI_OP_aer_resume, PCI_ERS_RESULT_RECOVERED);
+release:
+ pcistub_device_put(psdev);
+end:
+ up_write(&pcistub_sem);
+ return;
+}
+
+/*add pciback AER handling*/
+static struct pci_error_handlers pciback_error_handler = {
+ .error_detected = pciback_error_detected,
+ .mmio_enabled = pciback_mmio_enabled,
+ .slot_reset = pciback_slot_reset,
+ .resume = pciback_error_resume,
+};
+
/*
* Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
* for a normal device. I don't want it to be loaded automatically.
.id_table = pcistub_ids,
.probe = pcistub_probe,
.remove = pcistub_remove,
+ .err_handler = &pciback_error_handler,
};
static inline int str_to_slot(const char *buf, int *domain, int *bus,
#define _PDEVF_op_active (0)
#define PDEVF_op_active (1<<(_PDEVF_op_active))
+#define _PCIB_op_pending (1)
+#define PCIB_op_pending (1<<(_PCIB_op_pending))
struct pciback_device {
void *pci_dev_data;
struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn);
+
+/**
+* Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in pciback
+* before sending aer request to pcifront, so that guest could identify
+* device, coopearte with pciback to finish aer recovery job if device driver
+* has the capability
+*/
+
+int pciback_get_pcifront_dev(struct pci_dev *pcidev, struct pciback_device *pdev,
+ unsigned int *domain, unsigned int *bus, unsigned int *devfn);
int pciback_init_devices(struct pciback_device *pdev);
int pciback_publish_pci_roots(struct pciback_device *pdev,
publish_pci_root_cb cb);
void pciback_release_devices(struct pciback_device *pdev);
/* Handles events from front-end */
-irqreturn_t pciback_handle_event(int irq, void *dev_id);
-void pciback_do_op(struct work_struct *work);
+irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs);
+void pciback_do_op(void *data);
int pciback_xenbus_register(void);
void pciback_xenbus_unregister(void);
struct pci_dev *dev, struct xen_pci_op *op);
#endif
extern int verbose_request;
+
+void test_and_schedule_op(struct pciback_device *pdev);
#endif
+
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#include <linux/module.h>
+#include <linux/wait.h>
#include <asm/bitops.h>
#include <xen/evtchn.h>
#include "pciback.h"
pci_write_config_word(dev, PCI_COMMAND, 0);
- atomic_set(&dev->enable_cnt, 0);
+ dev->is_enabled = 0;
dev->is_busmaster = 0;
} else {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
}
}
}
-
-static inline void test_and_schedule_op(struct pciback_device *pdev)
+extern wait_queue_head_t aer_wait_queue;
+extern struct workqueue_struct *pciback_wq;
+/*
+* Now the same evtchn is used for both pcifront conf_read_write request
+* as well as pcie aer front end ack. We use a new work_queue to schedule
+* pciback conf_read_write service for avoiding confict with aer_core
+* do_recovery job which also use the system default work_queue
+*/
+void test_and_schedule_op(struct pciback_device *pdev)
{
/* Check that frontend is requesting an operation and that we are not
* already processing a request */
if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
&& !test_and_set_bit(_PDEVF_op_active, &pdev->flags))
- schedule_work(&pdev->op_work);
+ {
+ queue_work(pciback_wq, &pdev->op_work);
+ }
+ /*_XEN_PCIB_active should have been cleared by pcifront. And also make
+ sure pciback is waiting for ack by checking _PCIB_op_pending*/
+ if (!test_bit(_XEN_PCIB_active,(unsigned long *)&pdev->sh_info->flags)
+ &&test_bit(_PCIB_op_pending, &pdev->flags)) {
+ wake_up(&aer_wait_queue);
+ }
}
/* Performing the configuration space reads/writes must not be done in atomic
* context because some of the pci_* functions can sleep (mostly due to ACPI
* use of semaphores). This function is intended to be called from a work
* queue in process context taking a struct pciback_device as a parameter */
-void pciback_do_op(struct work_struct *work)
+void pciback_do_op(void *data)
{
- struct pciback_device *pdev = container_of(work, struct pciback_device, op_work);
+ struct pciback_device *pdev = data;
struct pci_dev *dev;
struct xen_pci_op *op = &pdev->sh_info->op;
smp_mb__after_clear_bit(); /* /before/ final check for work */
/* Check to see if the driver domain tried to start another request in
- * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. */
+ * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
+ */
test_and_schedule_op(pdev);
}
-irqreturn_t pciback_handle_event(int irq, void *dev_id)
+irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs)
{
struct pciback_device *pdev = dev_id;
kfree(slot_dev);
pdev->pci_dev_data = NULL;
}
+
+int pciback_get_pcifront_dev(struct pci_dev *pcidev, struct pciback_device *pdev,
+ unsigned int *domain, unsigned int *bus, unsigned int *devfn)
+{
+ int slot, busnr;
+ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
+ struct pci_dev *dev;
+ int found = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&slot_dev->lock, flags);
+
+ for (busnr = 0; busnr < PCI_BUS_NBR; bus++)
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+ dev = slot_dev->slots[busnr][slot];
+ if (dev && dev->bus->number == pcidev->bus->number
+ && dev->devfn == pcidev->devfn
+ && pci_domain_nr(dev->bus) == pci_domain_nr(pcidev->bus)) {
+ found = 1;
+ *domain = 0;
+ *bus = busnr;
+ *devfn = PCI_DEVFN(slot,0);
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&slot_dev->lock, flags);
+ return found;
+
+}
kfree(vpci_dev);
pdev->pci_dev_data = NULL;
}
+
+int pciback_get_pcifront_dev(struct pci_dev *pcidev, struct pciback_device *pdev,
+ unsigned int *domain, unsigned int *bus, unsigned int *devfn)
+{
+ struct pci_dev_entry *entry;
+ struct pci_dev *dev = NULL;
+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
+ unsigned long flags;
+ int found = 0, slot;
+
+ spin_lock_irqsave(&vpci_dev->lock, flags);
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+ list_for_each_entry(entry,
+ &vpci_dev->dev_list[slot],
+ list) {
+ dev = entry->dev;
+ if (dev && dev->bus->number == pcidev->bus->number
+ && pci_domain_nr(dev->bus) == pci_domain_nr(pcidev->bus)
+ && dev->devfn == pcidev->devfn)
+ {
+ found = 1;
+ *domain = 0;
+ *bus = 0;
+ *devfn = PCI_DEVFN(slot, PCI_FUNC(pcidev->devfn));
+ }
+ }
+ }
+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ return found;
+}
#include "pciback.h"
#define INVALID_EVTCHN_IRQ (-1)
+struct workqueue_struct *pciback_wq;
static struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
{
pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
pdev->be_watching = 0;
- INIT_WORK(&pdev->op_work, pciback_do_op);
+ INIT_WORK(&pdev->op_work, pciback_do_op, pdev);
if (pciback_init_devices(pdev)) {
kfree(pdev);
/* If the driver domain started an op, make sure we complete it or
* delete it before releasing the shared memory */
+ cancel_delayed_work(&pdev->op_work);
flush_scheduled_work();
if (pdev->sh_info != NULL) {
err = bind_interdomain_evtchn_to_irqhandler(
pdev->xdev->otherend_id, remote_evtchn, pciback_handle_event,
- IRQF_SAMPLE_RANDOM, "pciback", pdev);
+ SA_SAMPLE_RANDOM, "pciback", pdev);
if (err < 0) {
xenbus_dev_fatal(pdev->xdev, err,
"Error binding event channel to IRQ");
static struct xenbus_driver xenbus_pciback_driver = {
.name = "pciback",
+ .owner = THIS_MODULE,
.ids = xenpci_ids,
.probe = pciback_xenbus_probe,
.remove = pciback_xenbus_remove,
{
if (!is_running_on_xen())
return -ENODEV;
-
+ pciback_wq = create_workqueue("pciback_workqueue");
+ if (!pciback_wq) {
+ printk(KERN_ERR "pciback_xenbus_register: create"
+ "pciback_workqueue failed\n");
+ return -EFAULT;
+ }
return xenbus_register_backend(&xenbus_pciback_driver);
}
void __exit pciback_xenbus_unregister(void)
{
+ destroy_workqueue(pciback_wq);
xenbus_unregister_driver(&xenbus_pciback_driver);
}
* Authors: Jimi Xenidis <jimix@watson.ibm.com>
*/
+#include <linux/config.h>
#include <linux/compat.h>
#include <linux/ioctl.h>
#include <linux/syscalls.h>
static long privcmd_ioctl(struct file *file,
unsigned int cmd, unsigned long data)
{
- long ret = -ENOSYS;
+ int ret = -ENOSYS;
void __user *udata = (void __user *) data;
switch (cmd) {
if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
return -EFAULT;
-#ifdef CONFIG_X86
+#if defined(__i386__)
if (hypercall.op >= (PAGE_SIZE >> 5))
break;
- ret = _hypercall(long, (unsigned int)hypercall.op,
- (unsigned long)hypercall.arg[0],
- (unsigned long)hypercall.arg[1],
- (unsigned long)hypercall.arg[2],
- (unsigned long)hypercall.arg[3],
- (unsigned long)hypercall.arg[4]);
+ __asm__ __volatile__ (
+ "pushl %%ebx; pushl %%ecx; pushl %%edx; "
+ "pushl %%esi; pushl %%edi; "
+ "movl 8(%%eax),%%ebx ;"
+ "movl 16(%%eax),%%ecx ;"
+ "movl 24(%%eax),%%edx ;"
+ "movl 32(%%eax),%%esi ;"
+ "movl 40(%%eax),%%edi ;"
+ "movl (%%eax),%%eax ;"
+ "shll $5,%%eax ;"
+ "addl $hypercall_page,%%eax ;"
+ "call *%%eax ;"
+ "popl %%edi; popl %%esi; popl %%edx; "
+ "popl %%ecx; popl %%ebx"
+ : "=a" (ret) : "0" (&hypercall) : "memory" );
+#elif defined (__x86_64__)
+ if (hypercall.op < (PAGE_SIZE >> 5)) {
+ long ign1, ign2, ign3;
+ __asm__ __volatile__ (
+ "movq %8,%%r10; movq %9,%%r8;"
+ "shll $5,%%eax ;"
+ "addq $hypercall_page,%%rax ;"
+ "call *%%rax"
+ : "=a" (ret), "=D" (ign1),
+ "=S" (ign2), "=d" (ign3)
+ : "0" ((unsigned int)hypercall.op),
+ "1" (hypercall.arg[0]),
+ "2" (hypercall.arg[1]),
+ "3" (hypercall.arg[2]),
+ "g" (hypercall.arg[3]),
+ "g" (hypercall.arg[4])
+ : "r8", "r10", "memory" );
+ }
#else
ret = privcmd_hypercall(&hypercall);
#endif
}
#ifndef HAVE_ARCH_PRIVCMD_MMAP
-static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static struct page *privcmd_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int *type)
{
- return VM_FAULT_SIGBUS;
+ return NOPAGE_SIGBUS;
}
static struct vm_operations_struct privcmd_vm_ops = {
- .fault = privcmd_fault
+ .nopage = privcmd_nopage
};
static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
return -ENOSYS;
/* DONTCOPY is essential for Xen as copy_page_range is broken. */
- vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTCOPY;
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
vma->vm_ops = &privcmd_vm_ops;
vma->vm_private_data = NULL;
#ifndef NETBACK_ACCEL_H
#define NETBACK_ACCEL_H
-#include <linux/version.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/tcp.h>
unsigned long flags;
cuckoo_hash_mac_key key = cuckoo_mac_to_key(mac);
struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
- DECLARE_MAC_BUF(buf);
BUG_ON(fwd_priv == NULL);
- DPRINTK("Adding mac %s\n", print_mac(buf, mac));
+ DPRINTK("Adding mac " MAC_FMT "\n", MAC_ARG(mac));
spin_lock_irqsave(&fwd_set->fwd_lock, flags);
if (cuckoo_hash_lookup(&fwd_set->fwd_hash_table,
(cuckoo_hash_key *)(&key), &rc) != 0) {
spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
- EPRINTK("MAC address %s already accelerated.\n",
- print_mac(buf, mac));
+ EPRINTK("MAC address " MAC_FMT " already accelerated.\n",
+ MAC_ARG(mac));
return -EEXIST;
}
unsigned long flags;
cuckoo_hash_mac_key key = cuckoo_mac_to_key(mac);
struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
- DECLARE_MAC_BUF(buf);
- DPRINTK("Removing mac %s\n", print_mac(buf, mac));
+ DPRINTK("Removing mac " MAC_FMT "\n", MAC_ARG(mac));
BUG_ON(fwd_priv == NULL);
static inline int packet_is_arp_reply(struct sk_buff *skb)
{
return skb->protocol == ntohs(ETH_P_ARP)
- && arp_hdr(skb)->ar_op == ntohs(ARPOP_REPLY);
+ && skb->nh.arph->ar_op == ntohs(ARPOP_REPLY);
}
BUG_ON(fwd_priv == NULL);
- if (is_broadcast_ether_addr(skb_mac_header(skb))
- && packet_is_arp_reply(skb)) {
- DECLARE_MAC_BUF(buf);
-
+ if (is_broadcast_ether_addr(skb->mac.raw) && packet_is_arp_reply(skb)) {
/*
* update our fast path forwarding to reflect this
* gratuitous ARP
*/
- mac = skb_mac_header(skb)+ETH_ALEN;
+ mac = skb->mac.raw+ETH_ALEN;
- DPRINTK("%s: found gratuitous ARP for %s\n",
- __FUNCTION__, print_mac(buf, mac));
+ DPRINTK("%s: found gratuitous ARP for " MAC_FMT "\n",
+ __FUNCTION__, MAC_ARG(mac));
spin_lock_irqsave(&fwd_set->fwd_lock, flags);
/*
{
unsigned long lock_state;
struct net_accel_msg *msg;
- DECLARE_MAC_BUF(buf);
BUG_ON(bend == NULL || mac == NULL);
- VPRINTK("Sending local mac message: %s\n", print_mac(buf, mac));
+ VPRINTK("Sending local mac message: " MAC_FMT "\n",
+ MAC_ARG((const char *)mac));
msg = net_accel_msg_start_send(bend->shared_page, &bend->to_domU,
&lock_state);
BUG_ON(port == NULL);
NETBACK_ACCEL_STATS_OP(global_stats.dl_tx_packets++);
- if (skb_mac_header_was_set(skb))
+ if (skb->mac.raw != NULL)
netback_accel_tx_packet(skb, port->fwd_priv);
else {
DPRINTK("Ignoring packet with missing mac address\n");
int i;
for (i = 0; i < EFHW_MAX_NR_DEVS; i++) {
- struct efhw_nic *nic = efrm_nic_tablep->nic[i];
+ struct efhw_nic *nic = efrm_nic_table.nic[i];
/*
* It's possible for the nic structure to have not
/* Demultiplex a message IRQ from the frontend driver. */
-static irqreturn_t msgirq_from_frontend(int irq, void *context)
+static irqreturn_t msgirq_from_frontend(int irq, void *context,
+ struct pt_regs *unused)
{
struct xenbus_device *dev = context;
struct netback_accel *bend = NETBACK_ACCEL_FROM_XENBUS_DEVICE(dev);
* functionally, but we need it to pass to the bind function, and may
* get called spuriously
*/
-static irqreturn_t netirq_from_frontend(int irq, void *context)
+static irqreturn_t netirq_from_frontend(int irq, void *context,
+ struct pt_regs *unused)
{
VPRINTK("netirq %d from device %s\n", irq,
((struct xenbus_device *)context)->nodename);
};
/* Resource driver structures used by other drivers as well */
-extern struct efrm_nic_table *efrm_nic_tablep;
+extern struct efrm_nic_table efrm_nic_table;
static inline void efrm_nic_table_hold(void)
{
- atomic_inc(&efrm_nic_tablep->ref_count);
+ atomic_inc(&efrm_nic_table.ref_count);
}
static inline void efrm_nic_table_rele(void)
{
- atomic_dec(&efrm_nic_tablep->ref_count);
+ atomic_dec(&efrm_nic_table.ref_count);
}
static inline int efrm_nic_table_held(void)
{
- return (atomic_read(&efrm_nic_tablep->ref_count) != 0);
+ return (atomic_read(&efrm_nic_table.ref_count) != 0);
}
/* Run code block _x multiple times with variable nic set to each
for ((_nic_i) = (efrm_nic_table_hold(), 0); \
(_nic_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \
(_nic_i)++) \
- if (((_nic) = efrm_nic_tablep->nic[_nic_i]))
+ if (((_nic) = efrm_nic_table.nic[_nic_i]))
#define EFRM_FOR_EACH_NIC_IN_SET(_set, _i, _nic) \
for ((_i) = (efrm_nic_table_hold(), 0); \
(_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \
++(_i)) \
- if (((_nic) = efrm_nic_tablep->nic[_i]) && \
+ if (((_nic) = efrm_nic_table.nic[_i]) && \
efrm_nic_set_read((_set), (_i)))
#endif /* __CI_EFRM_NIC_TABLE_H__ */
#include <xen/evtchn.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/list.h>
enum netfront_accel_post_status {
u32 ip, u16 port, u8 protocol);
/* Process an IRQ received from back end driver */
-irqreturn_t netfront_accel_msg_channel_irq_from_bend(int irq, void *context);
-irqreturn_t netfront_accel_net_channel_irq_from_bend(int irq, void *context);
+irqreturn_t netfront_accel_msg_channel_irq_from_bend(int irq, void *context,
+ struct pt_regs *unused);
+irqreturn_t netfront_accel_net_channel_irq_from_bend(int irq, void *context,
+ struct pt_regs *unused);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
extern void netfront_accel_msg_from_bend(struct work_struct *context);
/* Prime our interrupt */
spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
if (!netfront_accel_vi_enable_interrupts(vnic)) {
- struct netfront_info *np = netdev_priv(vnic->net_dev);
-
/* Cripes, that was quick, better pass it up */
netfront_accel_disable_net_interrupts(vnic);
vnic->irq_enabled = 0;
NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_schedule_count++);
- netif_rx_schedule(vnic->net_dev, &np->napi);
+ netif_rx_schedule(vnic->net_dev);
} else {
/*
* Nothing yet, make sure we get interrupts through
static void vnic_start_fastpath(netfront_accel_vnic *vnic)
{
struct net_device *net_dev = vnic->net_dev;
- struct netfront_info *np = netdev_priv(net_dev);
unsigned long flags;
DPRINTK("%s\n", __FUNCTION__);
vnic->tx_enabled = 1;
spin_unlock_irqrestore(&vnic->tx_lock, flags);
- napi_disable(&np->napi);
+ netif_poll_disable(net_dev);
vnic->poll_enabled = 1;
- napi_enable(&np->napi);
+ netif_poll_enable(net_dev);
vnic_start_interrupts(vnic);
}
spin_unlock_irqrestore(&vnic->tx_lock, flags1);
/* Must prevent polls and hold lock to modify poll_enabled */
- napi_disable(&np->napi);
+ netif_poll_disable(net_dev);
spin_lock_irqsave(&vnic->irq_enabled_lock, flags1);
vnic->poll_enabled = 0;
spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags1);
- napi_enable(&np->napi);
+ netif_poll_enable(net_dev);
}
cuckoo_hash_mac_key key;
if (msg->u.localmac.flags & NET_ACCEL_MSG_ADD) {
- DECLARE_MAC_BUF(buf);
-
- DPRINTK("MAC has moved, could be local: %s\n",
- print_mac(buf, msg->u.localmac.mac));
+ DPRINTK("MAC has moved, could be local: " MAC_FMT "\n",
+ MAC_ARG(msg->u.localmac.mac));
key = cuckoo_mac_to_key(msg->u.localmac.mac);
spin_lock_irqsave(&vnic->table_lock, flags);
/* Try to remove it, not a big deal if not there */
}
-irqreturn_t netfront_accel_msg_channel_irq_from_bend(int irq, void *context)
+irqreturn_t netfront_accel_msg_channel_irq_from_bend(int irq, void *context,
+ struct pt_regs *unused)
{
netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
VPRINTK("irq %d from device %s\n", irq, vnic->dev->nodename);
}
/* Process an interrupt received from the NIC via backend */
-irqreturn_t netfront_accel_net_channel_irq_from_bend(int irq, void *context)
+irqreturn_t netfront_accel_net_channel_irq_from_bend(int irq, void *context,
+ struct pt_regs *unused)
{
netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
struct net_device *net_dev = vnic->net_dev;
spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
if (vnic->irq_enabled) {
- struct netfront_info *np = netdev_priv(net_dev);
-
netfront_accel_disable_net_interrupts(vnic);
vnic->irq_enabled = 0;
spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
vnic->stats.event_count_since_irq;
vnic->stats.event_count_since_irq = 0;
#endif
- netif_rx_schedule(net_dev, &np->napi);
+ netif_rx_schedule(net_dev);
}
else {
spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
#include "accel_tso.h"
-#define ETH_HDR_LEN(skb) skb_network_offset(skb)
-#define SKB_TCP_OFF(skb) skb_transport_offset(skb)
-#define SKB_IP_OFF(skb) skb_network_offset(skb)
+#define PTR_DIFF(p1, p2) ((u8*)(p1) - (u8*)(p2))
+#define ETH_HDR_LEN(skb) ((skb)->nh.raw - (skb)->data)
+#define SKB_TCP_OFF(skb) PTR_DIFF ((skb)->h.th, (skb)->data)
+#define SKB_IP_OFF(skb) PTR_DIFF ((skb)->nh.iph, (skb)->data)
/*
* Set a maximum number of buffers in each output packet to make life
static inline void tso_check_safe(struct sk_buff *skb) {
EPRINTK_ON(skb->protocol != htons (ETH_P_IP));
EPRINTK_ON(((struct ethhdr*) skb->data)->h_proto != htons (ETH_P_IP));
- EPRINTK_ON(ip_hdr(skb)->protocol != IPPROTO_TCP);
- EPRINTK_ON((SKB_TCP_OFF(skb) + tcp_hdrlen(skb)) > skb_headlen(skb));
+ EPRINTK_ON(skb->nh.iph->protocol != IPPROTO_TCP);
+ EPRINTK_ON((SKB_TCP_OFF(skb)
+ + (skb->h.th->doff << 2u)) > skb_headlen(skb));
}
* All ethernet/IP/TCP headers combined size is TCP header size
* plus offset of TCP header relative to start of packet.
*/
- st->p.header_length = tcp_hdrlen(skb) + SKB_TCP_OFF(skb);
+ st->p.header_length = (skb->h.th->doff << 2u) + SKB_TCP_OFF(skb);
st->p.full_packet_size = (st->p.header_length
+ skb_shinfo(skb)->gso_size);
st->p.gso_size = skb_shinfo(skb)->gso_size;
- st->p.ip_id = htons(ip_hdr(skb)->id);
- st->seqnum = ntohl(tcp_hdr(skb)->seq);
+ st->p.ip_id = htons(skb->nh.iph->id);
+ st->seqnum = ntohl(skb->h.th->seq);
- EPRINTK_ON(tcp_hdr(skb)->urg);
- EPRINTK_ON(tcp_hdr(skb)->syn);
- EPRINTK_ON(tcp_hdr(skb)->rst);
+ EPRINTK_ON(skb->h.th->urg);
+ EPRINTK_ON(skb->h.th->syn);
+ EPRINTK_ON(skb->h.th->rst);
st->remaining_len = skb->len - st->p.header_length;
/* This packet will be the last in the TSO burst. */
ip_length = (st->p.header_length - ETH_HDR_LEN(skb)
+ st->remaining_len);
- tsoh_th->fin = tcp_hdr(skb)->fin;
- tsoh_th->psh = tcp_hdr(skb)->psh;
+ tsoh_th->fin = skb->h.th->fin;
+ tsoh_th->psh = skb->h.th->psh;
}
tsoh_iph->tot_len = htons(ip_length);
tso_check_safe(skb);
- if (skb->ip_summed != CHECKSUM_PARTIAL)
+ if (skb->ip_summed != CHECKSUM_HW)
EPRINTK("Trying to TSO send a packet without HW checksum\n");
tso_start(&state, skb);
frag_i = -1;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->ip_summed == CHECKSUM_HW) {
/* Set to zero to encourage falcon to work it out for us */
- *(u16*)(skb->head + skb->csum_start + skb->csum_offset) = 0;
+ *(u16*)(skb->h.raw + skb->csum) = 0;
}
if (multi_post_start_new_buffer(vnic, &state)) {
kva = buf->pkt_kva;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->ip_summed == CHECKSUM_HW) {
/* Set to zero to encourage falcon to work it out for us */
- *(u16*)(skb->head + skb->csum_start + skb->csum_offset) = 0;
+ *(u16*)(skb->h.raw + skb->csum) = 0;
}
NETFRONT_ACCEL_PKTBUFF_FOR_EACH_FRAGMENT
(skb, idx, frag_data, frag_len, {
(cuckoo_hash_key *)(&key), &value);
if (!try_fastpath) {
- DECLARE_MAC_BUF(buf);
-
- VPRINTK("try fast path false for mac: %s\n",
- print_mac(buf, skb->data));
+ VPRINTK("try fast path false for mac: " MAC_FMT "\n",
+ MAC_ARG(skb->data));
return NETFRONT_ACCEL_STATUS_CANT;
}
if (compare_ether_addr(skb->data, vnic->mac)) {
struct iphdr *ip = (struct iphdr *)(skb->data + ETH_HLEN);
u16 port;
- DECLARE_MAC_BUF(buf);
- DPRINTK("%s: saw wrong MAC address %s\n",
- __FUNCTION__, print_mac(buf, skb->data));
+ DPRINTK("%s: saw wrong MAC address " MAC_FMT "\n",
+ __FUNCTION__, MAC_ARG(skb->data));
if (ip->protocol == IPPROTO_TCP) {
struct tcphdr *tcp = (struct tcphdr *)
/* Create xenbus msg event channel */
err = bind_listening_port_to_irqhandler
(dev->otherend_id, netfront_accel_msg_channel_irq_from_bend,
- IRQF_SAMPLE_RANDOM, "vnicctrl", vnic);
+ SA_SAMPLE_RANDOM, "vnicctrl", vnic);
if (err < 0) {
EPRINTK("Couldn't bind msg event channel\n");
goto fail_msg_irq;
/* Create xenbus net event channel */
err = bind_listening_port_to_irqhandler
(dev->otherend_id, netfront_accel_net_channel_irq_from_bend,
- IRQF_SAMPLE_RANDOM, "vnicfront", vnic);
+ SA_SAMPLE_RANDOM, "vnicfront", vnic);
if (err < 0) {
EPRINTK("Couldn't bind net event channel\n");
goto fail_net_irq;
DPRINTK("%s at %s:%d\n", #exp, __FILE__, __LINE__); \
} while(0)
+#define MAC_FMT "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x"
+#define MAC_ARG(_mac) (_mac)[0], (_mac)[1], (_mac)[2], (_mac)[3], (_mac)[4], (_mac)[5]
+
#include <xen/xenbus.h>
/*! Map a set of pages from another domain
* frontend or the backend of that driver.
*
* Copyright (C) 2005 XenSource Ltd
- *
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* IN THE SOFTWARE.
*/
-#if defined(CONFIG_XEN) || defined(MODULE)
#include <linux/slab.h>
#include <xen/evtchn.h>
#include <xen/gnttab.h>
-#include <xen/driver_util.h>
-#else
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-#include <asm/xen/hypervisor.h>
-#include <xen/interface/xen.h>
-#include <xen/interface/event_channel.h>
-#include <xen/events.h>
-#include <xen/grant_table.h>
-#endif
#include <xen/xenbus.h>
+#include <xen/driver_util.h>
#ifdef HAVE_XEN_PLATFORM_COMPAT_H
#include <xen/platform-compat.h>
#endif
+#define DPRINTK(fmt, args...) \
+ pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
+
const char *xenbus_strstate(enum xenbus_state state)
{
static const char *const name[] = {
}
EXPORT_SYMBOL_GPL(xenbus_strstate);
-/**
- * xenbus_watch_path - register a watch
- * @dev: xenbus device
- * @path: path to watch
- * @watch: watch to register
- * @callback: callback to register
- *
- * Register a @watch on the given path, using the given xenbus_watch structure
- * for storage, and the given @callback function as the callback. Return 0 on
- * success, or -errno on error. On success, the given @path will be saved as
- * @watch->node, and remains the caller's to free. On error, @watch->node will
- * be NULL, the device will switch to %XenbusStateClosing, and the error will
- * be saved in the store.
- */
int xenbus_watch_path(struct xenbus_device *dev, const char *path,
struct xenbus_watch *watch,
void (*callback)(struct xenbus_watch *,
EXPORT_SYMBOL_GPL(xenbus_watch_path);
-#if defined(CONFIG_XEN) || defined(MODULE)
int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
const char *path2, struct xenbus_watch *watch,
void (*callback)(struct xenbus_watch *,
return err;
}
EXPORT_SYMBOL_GPL(xenbus_watch_path2);
-#else
-/**
- * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
- * @dev: xenbus device
- * @watch: watch to register
- * @callback: callback to register
- * @pathfmt: format of path to watch
- *
- * Register a watch on the given @path, using the given xenbus_watch
- * structure for storage, and the given @callback function as the callback.
- * Return 0 on success, or -errno on error. On success, the watched path
- * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
- * kfree(). On error, watch->node will be NULL, so the caller has nothing to
- * free, the device will switch to %XenbusStateClosing, and the error will be
- * saved in the store.
- */
-int xenbus_watch_pathfmt(struct xenbus_device *dev,
- struct xenbus_watch *watch,
- void (*callback)(struct xenbus_watch *,
- const char **, unsigned int),
- const char *pathfmt, ...)
-{
- int err;
- va_list ap;
- char *path;
-
- va_start(ap, pathfmt);
- path = kvasprintf(GFP_KERNEL, pathfmt, ap);
- va_end(ap);
- if (!path) {
- xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
- return -ENOMEM;
- }
- err = xenbus_watch_path(dev, path, watch, callback);
- if (err)
- kfree(path);
- return err;
-}
-EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
-#endif
-
-
-/**
- * xenbus_switch_state
- * @dev: xenbus device
- * @xbt: transaction handle
- * @state: new state
- *
- * Advertise in the store a change of the given driver to the given new_state.
- * Return 0 on success, or -errno on error. On error, the device will switch
- * to XenbusStateClosing, and the error will be saved in the store.
- */
int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
{
/* We check whether the state is currently set to the given value, and
}
-static void _dev_error(struct xenbus_device *dev, int err,
- const char *fmt, va_list ap)
+void _dev_error(struct xenbus_device *dev, int err, const char *fmt,
+ va_list ap)
{
int ret;
unsigned int len;
path_buffer = error_path(dev);
if (path_buffer == NULL) {
- dev_err(&dev->dev,
- "xenbus: failed to write error node for %s (%s)\n",
- dev->nodename, printf_buffer);
+ printk("xenbus: failed to write error node for %s (%s)\n",
+ dev->nodename, printf_buffer);
goto fail;
}
if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
- dev_err(&dev->dev,
- "xenbus: failed to write error node for %s (%s)\n",
- dev->nodename, printf_buffer);
+ printk("xenbus: failed to write error node for %s (%s)\n",
+ dev->nodename, printf_buffer);
goto fail;
}
}
-/**
- * xenbus_dev_error
- * @dev: xenbus device
- * @err: error to report
- * @fmt: error message format
- *
- * Report the given negative errno into the store, along with the given
- * formatted message.
- */
void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
...)
{
EXPORT_SYMBOL_GPL(xenbus_dev_error);
-/**
- * xenbus_dev_fatal
- * @dev: xenbus device
- * @err: error to report
- * @fmt: error message format
- *
- * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
- * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
- * closedown of this driver and its peer.
- */
void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
...)
{
EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
-/**
- * xenbus_grant_ring
- * @dev: xenbus device
- * @ring_mfn: mfn of ring to grant
- *
- * Grant access to the given @ring_mfn to the peer of the given device. Return
- * 0 on success, or -errno on error. On error, the device will switch to
- * XenbusStateClosing, and the error will be saved in the store.
- */
int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
{
int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
EXPORT_SYMBOL_GPL(xenbus_grant_ring);
-/**
- * Allocate an event channel for the given xenbus_device, assigning the newly
- * created local port to *port. Return 0 on success, or -errno on error. On
- * error, the device will switch to XenbusStateClosing, and the error will be
- * saved in the store.
- */
int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
{
struct evtchn_alloc_unbound alloc_unbound;
EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
-#if 0 /* !defined(CONFIG_XEN) && !defined(MODULE) */
-/**
- * Bind to an existing interdomain event channel in another domain. Returns 0
- * on success and stores the local port in *port. On error, returns -errno,
- * switches the device to XenbusStateClosing, and saves the error in XenStore.
- */
-int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
-{
- struct evtchn_bind_interdomain bind_interdomain;
- int err;
-
- bind_interdomain.remote_dom = dev->otherend_id;
- bind_interdomain.remote_port = remote_port;
-
- err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
- &bind_interdomain);
- if (err)
- xenbus_dev_fatal(dev, err,
- "binding to event channel %d from domain %d",
- remote_port, dev->otherend_id);
- else
- *port = bind_interdomain.local_port;
-
- return err;
-}
-EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
-#endif
-
-
-/**
- * Free an existing event channel. Returns 0 on success or -errno on error.
- */
int xenbus_free_evtchn(struct xenbus_device *dev, int port)
{
struct evtchn_close close;
EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
-#if 0 /* !defined(CONFIG_XEN) && !defined(MODULE) */
-/**
- * xenbus_map_ring_valloc
- * @dev: xenbus device
- * @gnt_ref: grant reference
- * @vaddr: pointer to address to be filled out by mapping
- *
- * Based on Rusty Russell's skeleton driver's map_page.
- * Map a page of memory into this domain from another domain's grant table.
- * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
- * page to that address, and sets *vaddr to that address.
- * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
- * or -ENOMEM on error. If an error is returned, device will switch to
- * XenbusStateClosing and the error message will be saved in XenStore.
- */
-int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
-{
- struct gnttab_map_grant_ref op = {
- .flags = GNTMAP_host_map,
- .ref = gnt_ref,
- .dom = dev->otherend_id,
- };
- struct vm_struct *area;
-
- *vaddr = NULL;
-
- area = xen_alloc_vm_area(PAGE_SIZE);
- if (!area)
- return -ENOMEM;
-
- op.host_addr = (unsigned long)area->addr;
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
- BUG();
-
- if (op.status != GNTST_okay) {
- xen_free_vm_area(area);
- xenbus_dev_fatal(dev, op.status,
- "mapping in shared page %d from domain %d",
- gnt_ref, dev->otherend_id);
- return op.status;
- }
-
- /* Stuff the handle in an unused field */
- area->phys_addr = (unsigned long)op.handle;
-
- *vaddr = area->addr;
- return 0;
-}
-EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
-
-
-/**
- * xenbus_map_ring
- * @dev: xenbus device
- * @gnt_ref: grant reference
- * @handle: pointer to grant handle to be filled
- * @vaddr: address to be mapped to
- *
- * Map a page of memory into this domain from another domain's grant table.
- * xenbus_map_ring does not allocate the virtual address space (you must do
- * this yourself!). It only maps in the page to the specified address.
- * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
- * or -ENOMEM on error. If an error is returned, device will switch to
- * XenbusStateClosing and the error message will be saved in XenStore.
- */
-int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
- grant_handle_t *handle, void *vaddr)
-{
- struct gnttab_map_grant_ref op = {
- .host_addr = (unsigned long)vaddr,
- .flags = GNTMAP_host_map,
- .ref = gnt_ref,
- .dom = dev->otherend_id,
- };
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
- BUG();
-
- if (op.status != GNTST_okay) {
- xenbus_dev_fatal(dev, op.status,
- "mapping in shared page %d from domain %d",
- gnt_ref, dev->otherend_id);
- } else
- *handle = op.handle;
-
- return op.status;
-}
-EXPORT_SYMBOL_GPL(xenbus_map_ring);
-
-
-/**
- * xenbus_unmap_ring_vfree
- * @dev: xenbus device
- * @vaddr: addr to unmap
- *
- * Based on Rusty Russell's skeleton driver's unmap_page.
- * Unmap a page of memory in this domain that was imported from another domain.
- * Use xenbus_unmap_ring_vfree if you mapped in your memory with
- * xenbus_map_ring_valloc (it will free the virtual address space).
- * Returns 0 on success and returns GNTST_* on error
- * (see xen/include/interface/grant_table.h).
- */
-int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
-{
- struct vm_struct *area;
- struct gnttab_unmap_grant_ref op = {
- .host_addr = (unsigned long)vaddr,
- };
-
- /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
- * method so that we don't have to muck with vmalloc internals here.
- * We could force the user to hang on to their struct vm_struct from
- * xenbus_map_ring_valloc, but these 6 lines considerably simplify
- * this API.
- */
- read_lock(&vmlist_lock);
- for (area = vmlist; area != NULL; area = area->next) {
- if (area->addr == vaddr)
- break;
- }
- read_unlock(&vmlist_lock);
-
- if (!area) {
- xenbus_dev_error(dev, -ENOENT,
- "can't find mapped virtual address %p", vaddr);
- return GNTST_bad_virt_addr;
- }
-
- op.handle = (grant_handle_t)area->phys_addr;
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
- BUG();
-
- if (op.status == GNTST_okay)
- xen_free_vm_area(area);
- else
- xenbus_dev_error(dev, op.status,
- "unmapping page at handle %d error %d",
- (int16_t)area->phys_addr, op.status);
-
- return op.status;
-}
-EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
-
-
-/**
- * xenbus_unmap_ring
- * @dev: xenbus device
- * @handle: grant handle
- * @vaddr: addr to unmap
- *
- * Unmap a page of memory in this domain that was imported from another domain.
- * Returns 0 on success and returns GNTST_* on error
- * (see xen/include/interface/grant_table.h).
- */
-int xenbus_unmap_ring(struct xenbus_device *dev,
- grant_handle_t handle, void *vaddr)
-{
- struct gnttab_unmap_grant_ref op = {
- .host_addr = (unsigned long)vaddr,
- .handle = handle,
- };
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
- BUG();
-
- if (op.status != GNTST_okay)
- xenbus_dev_error(dev, op.status,
- "unmapping page at handle %d error %d",
- handle, op.status);
-
- return op.status;
-}
-EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
-#endif
-
-
-/**
- * xenbus_read_driver_state
- * @path: path for driver
- *
- * Return the state of the driver rooted at the given store path, or
- * XenbusStateUnknown if no state can be read.
- */
enum xenbus_state xenbus_read_driver_state(const char *path)
{
enum xenbus_state result;
* Low level code to talks to Xen Store: ringbuffer and event channel.
*
* Copyright (C) 2005 Rusty Russell, IBM Corporation
- *
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/err.h>
-#include <xen/xenbus.h>
-#if defined(CONFIG_XEN) || defined(MODULE)
+#include <linux/ptrace.h>
+#include <linux/workqueue.h>
#include <xen/evtchn.h>
+#include <xen/xenbus.h>
+
#include <asm/hypervisor.h>
-#else
-#include <asm/xen/hypervisor.h>
-#include <xen/events.h>
-#include <xen/page.h>
-#endif
#include "xenbus_comms.h"
static int xenbus_irq;
-extern void xenbus_probe(struct work_struct *);
+extern void xenbus_probe(void *);
extern int xenstored_ready;
-static DECLARE_WORK(probe_work, xenbus_probe);
+static DECLARE_WORK(probe_work, xenbus_probe, NULL);
static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
-static irqreturn_t wake_waiting(int irq, void *unused)
+static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs)
{
if (unlikely(xenstored_ready == 0)) {
xenstored_ready = 1;
return buf + MASK_XENSTORE_IDX(cons);
}
-/**
- * xb_write - low level write
- * @data: buffer to send
- * @len: length of buffer
- *
- * Returns 0 on success, error otherwise.
- */
int xb_write(const void *data, unsigned len)
{
struct xenstore_domain_interface *intf = xen_store_interface;
return 0;
}
-/**
- * xb_init_comms - Set up interrupt handler off store event channel.
- */
+/* Set up interrupt handler off store event channel. */
int xb_init_comms(void)
{
struct xenstore_domain_interface *intf = xen_store_interface;
intf->rsp_cons = intf->rsp_prod;
}
-#if defined(CONFIG_XEN) || defined(MODULE)
if (xenbus_irq)
unbind_from_irqhandler(xenbus_irq, &xb_waitq);
}
xenbus_irq = err;
-#else
- if (xenbus_irq) {
- /* Already have an irq; assume we're resuming */
- rebind_evtchn_irq(xen_store_evtchn, xenbus_irq);
- } else {
- err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting,
- 0, "xenbus", &xb_waitq);
- if (err <= 0) {
- printk(KERN_ERR "XENBUS request irq failed %i\n", err);
- return err;
- }
- xenbus_irq = err;
- }
-#endif
return 0;
}
/*
* Private include for xenbus communications.
- *
+ *
* Copyright (C) 2005 Rusty Russell, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
struct xsd_sockmsg hdr;
const char *path, *token;
int path_len, tok_len, body_len, data_len = 0;
-
+
path = vec[XS_WATCH_PATH];
token = adap->token;
* Copyright (C) 2005 Mike Wray, Hewlett-Packard
* Copyright (C) 2005, 2006 XenSource Ltd
* Copyright (C) 2007 Solarflare Communications, Inc.
- *
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
__FUNCTION__, __LINE__, ##args)
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/mutex.h>
-#include <linux/io.h>
+#include <linux/module.h>
+#include <asm/io.h>
#include <asm/page.h>
+#include <asm/maddr.h>
#include <asm/pgtable.h>
-#if defined(CONFIG_XEN) || defined(MODULE)
#include <asm/hypervisor.h>
#include <xen/xenbus.h>
#include <xen/xen_proc.h>
#ifdef MODULE
#include <xen/hvm.h>
#endif
-#else
-#include <asm/xen/hypervisor.h>
-#include <xen/xenbus.h>
-#include <xen/events.h>
-#include <xen/page.h>
-#endif
#include "xenbus_comms.h"
#include "xenbus_probe.h"
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env)
+static int xenbus_uevent_frontend(struct device *dev, char **envp,
+ int num_envp, char *buffer, int buffer_size)
{
struct xenbus_device *xdev;
+ int length = 0, i = 0;
if (dev == NULL)
return -ENODEV;
return -ENODEV;
/* stuff we want to pass to /sbin/hotplug */
-#if defined(CONFIG_XEN) || defined(MODULE)
- add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype);
- add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename);
-#endif
- add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype);
+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
+ "XENBUS_TYPE=%s", xdev->devicetype);
+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
+ "XENBUS_PATH=%s", xdev->nodename);
+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
+ "MODALIAS=xen:%s", xdev->devicetype);
return 0;
}
.uevent = xenbus_uevent_frontend,
#endif
},
-#if defined(CONFIG_XEN) || defined(MODULE)
.dev = {
.bus_id = "xen",
},
-#endif
};
static void otherend_changed(struct xenbus_watch *watch,
if (!dev->otherend ||
strncmp(dev->otherend, vec[XS_WATCH_PATH],
strlen(dev->otherend))) {
- dev_dbg(&dev->dev, "Ignoring watch at %s", vec[XS_WATCH_PATH]);
+ DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]);
return;
}
state = xenbus_read_driver_state(dev->otherend);
- dev_dbg(&dev->dev, "state is %d (%s), %s, %s",
- state, xenbus_strstate(state), dev->otherend_watch.node,
- vec[XS_WATCH_PATH]);
+ DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state),
+ dev->otherend_watch.node, vec[XS_WATCH_PATH]);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
/*
static int watch_otherend(struct xenbus_device *dev)
{
-#if defined(CONFIG_XEN) || defined(MODULE)
return xenbus_watch_path2(dev, dev->otherend, "state",
&dev->otherend_watch, otherend_changed);
-#else
- return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed,
- "%s/%s", dev->otherend, "state");
-#endif
}
err = talk_to_otherend(dev);
if (err) {
- dev_warn(&dev->dev,
- "xenbus_probe: talk_to_otherend on %s failed.\n",
- dev->nodename);
+ printk(KERN_WARNING
+ "xenbus_probe: talk_to_otherend on %s failed.\n",
+ dev->nodename);
return err;
}
err = watch_otherend(dev);
if (err) {
- dev_warn(&dev->dev,
- "xenbus_probe: watch_otherend on %s failed.\n",
- dev->nodename);
+ printk(KERN_WARNING
+ "xenbus_probe: watch_otherend on %s failed.\n",
+ dev->nodename);
return err;
}
get_device(&dev->dev);
if (dev->state != XenbusStateConnected) {
- dev_info(&dev->dev, "%s: %s: %s != Connected, skipping\n", __FUNCTION__,
- dev->nodename, xenbus_strstate(dev->state));
+ printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__,
+ dev->nodename, xenbus_strstate(dev->state));
goto out;
}
xenbus_switch_state(dev, XenbusStateClosing);
timeout = wait_for_completion_timeout(&dev->down, timeout);
if (!timeout)
- dev_info(&dev->dev, "%s: %s timeout closing device\n",
- __FUNCTION__, dev->nodename);
+ printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename);
out:
put_device(&dev->dev);
}
int xenbus_register_driver_common(struct xenbus_driver *drv,
- struct xen_bus_type *bus,
- struct module *owner,
- const char *mod_name)
+ struct xen_bus_type *bus)
{
int ret;
drv->driver.name = drv->name;
drv->driver.bus = &bus->bus;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
- drv->driver.owner = owner;
-#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
- drv->driver.mod_name = mod_name;
+ drv->driver.owner = drv->owner;
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
drv->driver.probe = xenbus_dev_probe;
return ret;
}
-int __xenbus_register_frontend(struct xenbus_driver *drv,
- struct module *owner, const char *mod_name)
+int xenbus_register_frontend(struct xenbus_driver *drv)
{
int ret;
drv->read_otherend_details = read_backend_details;
- ret = xenbus_register_driver_common(drv, &xenbus_frontend,
- owner, mod_name);
+ ret = xenbus_register_driver_common(drv, &xenbus_frontend);
if (ret)
return ret;
return 0;
}
-EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+EXPORT_SYMBOL_GPL(xenbus_register_frontend);
void xenbus_unregister_driver(struct xenbus_driver *drv)
{
}
DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
-static ssize_t xendev_show_modalias(struct device *dev,
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
- struct device_attribute *attr,
-#endif
- char *buf)
-{
- return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype);
-}
-DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL);
int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
xendev->devicetype = tmpstring;
init_completion(&xendev->down);
-#if defined(CONFIG_XEN) || defined(MODULE)
xendev->dev.parent = &bus->dev;
-#endif
xendev->dev.bus = &bus->bus;
xendev->dev.release = xenbus_dev_release;
err = device_create_file(&xendev->dev, &dev_attr_nodename);
if (err)
- goto fail_unregister;
-
+ goto unregister;
err = device_create_file(&xendev->dev, &dev_attr_devtype);
if (err)
- goto fail_remove_nodename;
-
- err = device_create_file(&xendev->dev, &dev_attr_modalias);
- if (err)
- goto fail_remove_devtype;
+ goto unregister;
return 0;
-fail_remove_devtype:
- device_remove_file(&xendev->dev, &dev_attr_devtype);
-fail_remove_nodename:
+unregister:
device_remove_file(&xendev->dev, &dev_attr_nodename);
-fail_unregister:
+ device_remove_file(&xendev->dev, &dev_attr_devtype);
device_unregister(&xendev->dev);
fail:
kfree(xendev);
char *nodename;
int err;
- nodename = kasprintf(GFP_KERNEL, "%s/%s/%s",
- xenbus_frontend.root, type, name);
+ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name);
if (!nodename)
return -ENOMEM;
return (len == 0) ? i : -ERANGE;
}
-void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
+void dev_changed(const char *node, struct xen_bus_type *bus)
{
int exists, rootlen;
struct xenbus_device *dev;
const char *p, *root;
if (bus->error || char_count(node, '/') < 2)
- return;
+ return;
exists = xenbus_exists(XBT_NIL, node, "");
if (!exists) {
{
DPRINTK("");
- xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
+ dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
}
/* We watch for devices appearing and vanishing. */
if (drv->resume) {
err = drv->resume(xdev);
- if (err) {
+ if (err) {
printk(KERN_WARNING
- "xenbus: resume %s failed: %i\n",
+ "xenbus: resume %s failed: %i\n",
dev->bus_id, err);
return err;
}
EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
-void xenbus_probe(struct work_struct *unused)
+void xenbus_probe(void *unused)
{
BUG_ON((xenstored_ready <= 0));
}
#endif
-#ifndef MODULE
-static int __init xenbus_probe_init(void)
-#else
-static int __devinit xenbus_probe_init(void)
-#endif
+static int xenbus_probe_init(void)
{
int err = 0;
-#if defined(CONFIG_XEN) || defined(MODULE)
unsigned long page = 0;
-#endif
DPRINTK("");
* Domain0 doesn't have a store_evtchn or store_mfn yet.
*/
if (is_initial_xendomain()) {
-#if defined(CONFIG_XEN) || defined(MODULE)
struct evtchn_alloc_unbound alloc_unbound;
/* Allocate page. */
xsd_port_intf = create_xen_proc_entry("xsd_port", 0400);
if (xsd_port_intf)
xsd_port_intf->read_proc = xsd_port_read;
-#endif
-#else
- /* dom0 not yet supported */
#endif
xen_store_interface = mfn_to_virt(xen_store_mfn);
} else {
xenstored_ready = 1;
-#ifndef MODULE
+#ifdef CONFIG_XEN
xen_store_evtchn = xen_start_info->store_evtchn;
xen_store_mfn = xen_start_info->store_mfn;
xen_store_interface = mfn_to_virt(xen_store_mfn);
}
-#if defined(CONFIG_XEN) || defined(MODULE)
xenbus_dev_init();
-#endif
/* Initialize the interface to xenstore. */
err = xs_init();
goto err;
}
-#if defined(CONFIG_XEN) || defined(MODULE)
/* Register ourselves with the kernel device subsystem */
if (!xenbus_frontend.error) {
xenbus_frontend.error = device_register(&xenbus_frontend.dev);
xenbus_frontend.error);
}
}
-#endif
xenbus_backend_device_register();
if (!is_initial_xendomain())
return 0;
err:
-#if defined(CONFIG_XEN) || defined(MODULE)
if (page)
free_page(page);
-#endif
/*
* Do not unregister the xenbus front/backend buses here. The buses
return err;
}
-#ifndef MODULE
-postcore_initcall(xenbus_probe_init);
#ifdef CONFIG_XEN
+postcore_initcall(xenbus_probe_init);
MODULE_LICENSE("Dual BSD/GPL");
#else
-MODULE_LICENSE("GPL");
-#endif
-#else
-int __devinit xenbus_init(void)
+int xenbus_init(void)
{
return xenbus_probe_init();
}
if (seconds_waited == 300)
break;
}
-
+
schedule_timeout_interruptible(HZ/10);
}
*
* Copyright (C) 2005 Rusty Russell, IBM Corporation
* Copyright (C) 2005 XenSource Ltd.
- *
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
int (*probe)(const char *type, const char *dir);
struct bus_type bus;
-#if defined(CONFIG_XEN) || defined(MODULE)
struct device dev;
-#endif
};
extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
extern int xenbus_dev_probe(struct device *_dev);
extern int xenbus_dev_remove(struct device *_dev);
extern int xenbus_register_driver_common(struct xenbus_driver *drv,
- struct xen_bus_type *bus,
- struct module *owner,
- const char *mod_name);
+ struct xen_bus_type *bus);
extern int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
const char *nodename);
extern int xenbus_probe_devices(struct xen_bus_type *bus);
-extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
+extern void dev_changed(const char *node, struct xen_bus_type *bus);
#endif
#include <xen/platform-compat.h>
#endif
-static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env);
+static int xenbus_uevent_backend(struct device *dev, char **envp,
+ int num_envp, char *buffer, int buffer_size);
static int xenbus_probe_backend(const char *type, const char *domid);
extern int read_otherend_details(struct xenbus_device *xendev,
},
};
-static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env)
+static int xenbus_uevent_backend(struct device *dev, char **envp,
+ int num_envp, char *buffer, int buffer_size)
{
struct xenbus_device *xdev;
struct xenbus_driver *drv;
+ int i = 0;
+ int length = 0;
DPRINTK("");
return -ENODEV;
/* stuff we want to pass to /sbin/hotplug */
- add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype);
+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
+ "XENBUS_TYPE=%s", xdev->devicetype);
- add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename);
+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
+ "XENBUS_PATH=%s", xdev->nodename);
- add_uevent_var(env, "XENBUS_BASE_PATH=%s", xenbus_backend.root);
+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
+ "XENBUS_BASE_PATH=%s", xenbus_backend.root);
+
+ /* terminate, set to next free slot, shrink available space */
+ envp[i] = NULL;
+ envp = &envp[i];
+ num_envp -= i;
+ buffer = &buffer[length];
+ buffer_size -= length;
if (dev->driver) {
drv = to_xenbus_driver(dev->driver);
if (drv && drv->uevent)
- return drv->uevent(xdev, env);
+ return drv->uevent(xdev, envp, num_envp, buffer,
+ buffer_size);
}
return 0;
}
-int __xenbus_register_backend(struct xenbus_driver *drv,
- struct module *owner, const char *mod_name)
+int xenbus_register_backend(struct xenbus_driver *drv)
{
drv->read_otherend_details = read_frontend_details;
- return xenbus_register_driver_common(drv, &xenbus_backend,
- owner, mod_name);
+ return xenbus_register_driver_common(drv, &xenbus_backend);
}
-EXPORT_SYMBOL_GPL(__xenbus_register_backend);
+EXPORT_SYMBOL_GPL(xenbus_register_backend);
/* backend/<typename>/<frontend-uuid>/<name> */
static int xenbus_probe_backend_unit(const char *dir,
{
DPRINTK("");
- xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
+ dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
}
static struct xenbus_watch be_watch = {
* and we use xenbus_comms for communication.
*
* Copyright (C) 2005 Rusty Russell, IBM Corporation
- *
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
}
for (i = 0; i < num_vecs; i++) {
- err = xb_write(iovec[i].iov_base, iovec[i].iov_len);
+ err = xb_write(iovec[i].iov_base, iovec[i].iov_len);;
if (err) {
mutex_unlock(&xs_state.request_mutex);
return ERR_PTR(err);
char token[sizeof(watch) * 2 + 1];
int err;
-#if defined(CONFIG_XEN) || defined(MODULE)
BUG_ON(watch->flags & XBWF_new_thread);
-#endif
sprintf(token, "%lX", (long)watch);
up_read(&xs_state.watch_mutex);
- /* Make sure there are no callbacks running currently (unless
- its us) */
- if (current->pid != xenwatch_pid)
- mutex_lock(&xenwatch_mutex);
-
/* Cancel pending watch events. */
spin_lock(&watch_events_lock);
list_for_each_entry_safe(msg, tmp, &watch_events, list) {
}
spin_unlock(&watch_events_lock);
- if (current->pid != xenwatch_pid)
+ /* Flush any currently-executing callback, unless we are it. :-) */
+ if (current->pid != xenwatch_pid) {
+ mutex_lock(&xenwatch_mutex);
mutex_unlock(&xenwatch_mutex);
+ }
}
EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
up_write(&xs_state.transaction_mutex);
}
-#if defined(CONFIG_XEN) || defined(MODULE)
static int xenwatch_handle_callback(void *data)
{
struct xs_stored_msg *msg = data;
return 0;
}
-#endif
static int xenwatch_thread(void *unused)
{
msg = list_entry(ent, struct xs_stored_msg, list);
-#if defined(CONFIG_XEN) || defined(MODULE)
/*
* Unlock the mutex before running an XBWF_new_thread
* handler. kthread_run can block which can deadlock
xenwatch_handle_callback(msg);
mutex_unlock(&xenwatch_mutex);
}
-#else
- msg->u.watch.handle->callback(
- msg->u.watch.handle,
- (const char **)msg->u.watch.vec,
- msg->u.watch.vec_size);
- mutex_unlock(&xenwatch_mutex);
- kfree(msg->u.watch.vec);
- kfree(msg);
-#endif
}
return 0;