]> xenbits.xensource.com Git - libvirt.git/commitdiff
storage backend: Add RBD (RADOS Block Device) support
authorWido den Hollander <wido@widodh.nl>
Mon, 14 May 2012 09:06:42 +0000 (11:06 +0200)
committerEric Blake <eblake@redhat.com>
Mon, 21 May 2012 18:37:38 +0000 (12:37 -0600)
This patch adds support for a new storage backend with RBD support.

RBD is the RADOS Block Device and is part of the Ceph distributed storage
system.

It comes in two flavours: Qemu-RBD and Kernel RBD, this storage backend only
supports Qemu-RBD, thus limiting the use of this storage driver to Qemu only.

To function this backend relies on librbd and librados being present on the
local system.

The backend also supports Cephx authentication for safe authentication with
the Ceph cluster.

For storing credentials it uses the built-in secret mechanism of libvirt.

Signed-off-by: Wido den Hollander <wido@widodh.nl>
16 files changed:
configure.ac
docs/drivers.html.in
docs/schemas/storagepool.rng
docs/storage.html.in
include/libvirt/libvirt.h.in
libvirt.spec.in
po/POTFILES.in
src/Makefile.am
src/conf/storage_conf.c
src/conf/storage_conf.h
src/storage/storage_backend.c
src/storage/storage_backend_rbd.c [new file with mode: 0644]
src/storage/storage_backend_rbd.h [new file with mode: 0644]
tests/storagepoolxml2xmlin/pool-rbd.xml [new file with mode: 0644]
tests/storagepoolxml2xmlout/pool-rbd.xml [new file with mode: 0644]
tools/virsh.c

index d666736fbc889a0b8e64b51304e6af4951f21615..06c6a4b92306714ec09651111cbc20bedab2b5ba 100644 (file)
@@ -1811,6 +1811,8 @@ AC_ARG_WITH([storage-mpath],
   AC_HELP_STRING([--with-storage-mpath], [with mpath backend for the storage driver @<:@default=check@:>@]),[],[with_storage_mpath=check])
 AC_ARG_WITH([storage-disk],
   AC_HELP_STRING([--with-storage-disk], [with GPartd Disk backend for the storage driver @<:@default=check@:>@]),[],[with_storage_disk=check])
+AC_ARG_WITH([storage-rbd],
+  AC_HELP_STRING([--with-storage-rbd], [with RADOS Block Device backend for the storage driver @<:@default=check@:>@]),[],[with_storage_rbd=check])
 
 if test "$with_libvirtd" = "no"; then
   with_storage_dir=no
@@ -1820,6 +1822,7 @@ if test "$with_libvirtd" = "no"; then
   with_storage_scsi=no
   with_storage_mpath=no
   with_storage_disk=no
+  with_storage_rbd=no
 fi
 if test "$with_storage_dir" = "yes" ; then
   AC_DEFINE_UNQUOTED([WITH_STORAGE_DIR], 1, [whether directory backend for storage driver is enabled])
@@ -1978,6 +1981,22 @@ if test "$with_storage_mpath" = "check"; then
 fi
 AM_CONDITIONAL([WITH_STORAGE_MPATH], [test "$with_storage_mpath" = "yes"])
 
+LIBRBD_LIBS=
+if test "$with_storage_rbd" = "yes" || test "$with_storage_rbd" = "check"; then
+    AC_CHECK_HEADER([rbd/librbd.h], [LIBRBD_FOUND=yes; break;])
+
+    if test "$LIBRBD_FOUND" = "yes"; then
+        with_storage_rbd=yes
+        LIBRBD_LIBS="-lrbd -lrados -lcrypto"
+        AC_DEFINE_UNQUOTED([WITH_STORAGE_RBD], [1],
+         [whether RBD backend for storage driver is enabled])
+    else
+        with_storage_rbd=no
+    fi
+fi
+AM_CONDITIONAL([WITH_STORAGE_RBD], [test "$with_storage_rbd" = "yes"])
+AC_SUBST([LIBRBD_LIBS])
+
 LIBPARTED_CFLAGS=
 LIBPARTED_LIBS=
 if test "$with_storage_disk" = "yes" ||
@@ -2754,6 +2773,7 @@ AC_MSG_NOTICE([   iSCSI: $with_storage_iscsi])
 AC_MSG_NOTICE([    SCSI: $with_storage_scsi])
 AC_MSG_NOTICE([   mpath: $with_storage_mpath])
 AC_MSG_NOTICE([    Disk: $with_storage_disk])
+AC_MSG_NOTICE([     RBD: $with_storage_rbd])
 AC_MSG_NOTICE([])
 AC_MSG_NOTICE([Security Drivers])
 AC_MSG_NOTICE([])
index 75038fc17d92d3241669ec50731242415557ab78..8ad2c33218db51e3d8fc5d365d975e2d9e04c38d 100644 (file)
@@ -42,6 +42,7 @@
       <li><strong><a href="storage.html#StorageBackendISCSI">iSCSI backend</a></strong></li>
       <li><strong><a href="storage.html#StorageBackendSCSI">SCSI backend</a></strong></li>
       <li><strong><a href="storage.html#StorageBackendMultipath">Multipath backend</a></strong></li>
+      <li><strong><a href="storage.html#StorageBackendRBD">RBD (RADOS Block Device) backend</a></strong></li>
     </ul>
   </body>
 </html>
index d4c80da8a7c650fb3d4df7c99b9602c3d6f4f565..75b6b514b2b03bbc4d42921f9ce8ae2bb2fabe42 100644 (file)
@@ -19,6 +19,7 @@
         <ref name='pooliscsi'/>
         <ref name='poolscsi'/>
         <ref name='poolmpath'/>
+        <ref name='poolrbd'/>
       </choice>
     </element>
   </define>
     <ref name='target'/>
   </define>
 
+  <define name='poolrbd'>
+    <attribute name='type'>
+      <value>rbd</value>
+    </attribute>
+    <ref name='commonmetadata'/>
+    <ref name='sizing'/>
+    <ref name='sourcerbd'/>
+  </define>
+
   <define name='sourceinfovendor'>
     <optional>
       <element name='vendor'>
   </define>
 
   <define name='sourceinfohost'>
-    <element name='host'>
-      <attribute name='name'>
-        <text/>
-      </attribute>
-      <optional>
-        <attribute name='port'>
-          <ref name="PortNumber"/>
+    <oneOrMore>
+      <element name='host'>
+        <attribute name='name'>
+          <text/>
         </attribute>
-      </optional>
-      <empty/>
-    </element>
+        <optional>
+          <attribute name='port'>
+            <ref name="PortNumber"/>
+          </attribute>
+        </optional>
+        <empty/>
+      </element>
+    </oneOrMore>
   </define>
 
   <define name='sourceinfodev'>
       <attribute name='type'>
         <choice>
           <value>chap</value>
+          <value>ceph</value>
         </choice>
       </attribute>
-      <attribute name='login'>
-        <text/>
-      </attribute>
-      <attribute name='passwd'>
-        <text/>
-      </attribute>
+      <choice>
+        <attribute name='login'>
+          <text/>
+        </attribute>
+        <attribute name='username'>
+          <text/>
+        </attribute>
+      </choice>
+      <optional>
+        <attribute name='passwd'>
+          <text/>
+        </attribute>
+      </optional>
+      <optional>
+        <ref name='sourceinfoauthsecret'/>
+      </optional>
+    </element>
+  </define>
+
+  <define name='sourceinfoauthsecret'>
+    <element name='secret'>
+      <choice>
+        <attribute name='uuid'>
+          <text/>
+        </attribute>
+        <attribute name='usage'>
+          <text/>
+        </attribute>
+      </choice>
     </element>
   </define>
 
     </element>
   </define>
 
+  <define name='sourcerbd'>
+    <element name='source'>
+      <ref name='sourceinfoname'/>
+      <ref name='sourceinfohost'/>
+      <optional>
+        <ref name='sourceinfoauth'/>
+      </optional>
+    </element>
+  </define>
+
   <define name='name'>
     <data type='string'>
       <param name="pattern">[a-zA-Z0-9_\+\-]+</param>
index 0e3e2891a4d3a0d4b09b96ec3e0c768419b06f37..b3484e811bc9269a8b533990927dcc58d3abf794 100644 (file)
       <li>
         <a href="#StorageBackendMultipath">Multipath backend</a>
       </li>
+      <li>
+        <a href="#StorageBackendRBD">RBD (RADOS Block Device) backend</a>
+      </li>
     </ul>
 
     <h2><a name="StorageBackendDir">Directory pool</a></h2>
       The Multipath volume pool does not use the volume format type element.
     </p>
 
+    <h2><a name="StorageBackendRBD">RBD pools</a></h2>
+    <p>
+      This storage driver provides a pool which contains all RBD
+      images in a RADOS pool.  RBD (RADOS Block Device) is part
+      of the Ceph distributed storage project.<br/>
+      This backend <i>only</i> supports Qemu with RBD support. Kernel RBD
+      which exposes RBD devices as block devices in /dev is <i>not</i>
+      supported. RBD images created with this storage backend
+      can be accessed through kernel RBD if configured manually, but
+      this backend does not provide mapping for these images.<br/>
+      Images created with this backend can be attached to Qemu guests
+      when Qemu is build with RBD support (Since Qemu 0.14.0). The
+      backend supports cephx authentication for communication with the
+      Ceph cluster. Storing the cephx authentication key is done with
+      the libvirt secret mechanism. The UUID in the example pool input
+      refers to the UUID of the stored secret.
+      <span class="since">Since 0.9.13</span>
+    </p>
+
+    <h3>Example pool input</h3>
+    <pre>
+      &lt;pool type="rbd"&gt;
+        &lt;name&gt;myrbdpool&lt;/name&gt;
+        &lt;source&gt;
+          &lt;name&gt;rbdpool&lt;/name&gt;
+            &lt;host name='1.2.3.4' port='6789'/&gt;
+            &lt;host name='my.ceph.monitor' port='6789'/&gt;
+            &lt;host name='third.ceph.monitor' port='6789'/&gt;
+            &lt;auth username='admin' type='ceph'&gt;
+              &lt;secret uuid='2ec115d7-3a88-3ceb-bc12-0ac909a6fd87'/&gt;
+            &lt;/auth&gt;
+        &lt;/source&gt;
+      &lt;/pool&gt;</pre>
+
+    <h3>Example volume output</h3>
+    <pre>
+       &lt;volume&gt;
+         &lt;name&gt;myvol&lt;/name&gt;
+         &lt;key&gt;rbd/myvol&lt;/key&gt;
+         &lt;source&gt;
+         &lt;/source&gt;
+         &lt;capacity unit='bytes'&gt;53687091200&lt;/capacity&gt;
+         &lt;allocation unit='bytes'&gt;53687091200&lt;/allocation&gt;
+         &lt;target&gt;
+           &lt;path&gt;rbd:rbd/myvol&lt;/path&gt;
+           &lt;format type='unknown'/&gt;
+           &lt;permissions&gt;
+             &lt;mode&gt;00&lt;/mode&gt;
+             &lt;owner&gt;0&lt;/owner&gt;
+             &lt;group&gt;0&lt;/group&gt;
+           &lt;/permissions&gt;
+         &lt;/target&gt;
+       &lt;/volume&gt;</pre>
+
+    <h3>Example disk attachement</h3>
+    <p>RBD images can be attached to Qemu guests when Qemu is built
+    with RBD support. Information about attaching a RBD image to a
+    guest can be found
+    at <a href="formatdomain.html#elementsDisks">format domain</a>
+    page.</p>
+
+    <h3>Valid pool format types</h3>
+    <p>
+      The RBD pool does not use the pool format type element.
+    </p>
+
+    <h3>Valid volume format types</h3>
+    <p>
+      The RBD pool does not use the volume format type element.
+    </p>
 
   </body>
 </html>
index a817db80b02722aed40223655d824bbe3a1499b3..da3ce29fd6fea55cb33356761fa3705f3ea4509e 100644 (file)
@@ -2361,6 +2361,7 @@ typedef enum {
   VIR_STORAGE_VOL_FILE = 0,     /* Regular file based volumes */
   VIR_STORAGE_VOL_BLOCK = 1,    /* Block based volumes */
   VIR_STORAGE_VOL_DIR = 2,      /* Directory-passthrough based volume */
+  VIR_STORAGE_VOL_NETWORK = 3,  /* Network volumes like RBD (RADOS Block Device) */
 
 #ifdef VIR_ENUM_SENTINELS
     VIR_STORAGE_VOL_LAST
index eef8ce1601e35213495ad38b56df541616aa7d9f..8c4a2fd2a265c040a839eeed3a7a115cc3808aa6 100644 (file)
@@ -73,6 +73,9 @@
 %define with_storage_iscsi 0%{!?_without_storage_iscsi:%{server_drivers}}
 %define with_storage_disk  0%{!?_without_storage_disk:%{server_drivers}}
 %define with_storage_mpath 0%{!?_without_storage_mpath:%{server_drivers}}
+%if 0%{?fedora} >= 16
+%define with_storage_rbd   0%{!?_without_storage_rbd:%{server_drivers}}
+%endif
 %define with_numactl       0%{!?_without_numactl:%{server_drivers}}
 %define with_selinux       0%{!?_without_selinux:%{server_drivers}}
 
 %define with_storage_lvm 0
 %define with_storage_iscsi 0
 %define with_storage_mpath 0
+%define with_storage_rbd 0
 %define with_storage_disk 0
 %endif
 
@@ -407,6 +411,9 @@ BuildRequires: device-mapper
 %else
 BuildRequires: device-mapper-devel
 %endif
+%if %{with_storage_rbd}
+BuildRequires: ceph-devel
+%endif
 %endif
 %if %{with_numactl}
 # For QEMU/LXC numa info
@@ -560,6 +567,10 @@ Requires: device-mapper
 # For multipath support
 Requires: device-mapper
 %endif
+%if %{with_storage_rbd}
+# For RBD support
+Requires: ceph
+%endif
 %if %{with_cgconfig}
 Requires: libcgroup
 %endif
@@ -840,6 +851,10 @@ of recent versions of Linux (and other OSes).
 %define _without_storage_mpath --without-storage-mpath
 %endif
 
+%if ! %{with_storage_rbd}
+%define _without_storage_rbd --without-storage-rbd
+%endif
+
 %if ! %{with_numactl}
 %define _without_numactl --without-numactl
 %endif
@@ -933,6 +948,7 @@ autoreconf -if
            %{?_without_storage_iscsi} \
            %{?_without_storage_disk} \
            %{?_without_storage_mpath} \
+           %{?_without_storage_rbd} \
            %{?_without_numactl} \
            %{?_without_numad} \
            %{?_without_capng} \
index cfa9d44e40aa1ffa2c0d717ae27a5dd77bc13f6d..91216cbb023ba05e977026c01c3d76776da7aa63 100644 (file)
@@ -104,6 +104,7 @@ src/storage/storage_backend_fs.c
 src/storage/storage_backend_iscsi.c
 src/storage/storage_backend_logical.c
 src/storage/storage_backend_mpath.c
+src/storage/storage_backend_rbd.c
 src/storage/storage_backend_scsi.c
 src/storage/storage_driver.c
 src/test/test_driver.c
index 2ecd18873ee96e7ec1aae5c7b75bbd439dbb98a2..e9621c11af5aa1e9e876cb31b8ca282eb3d21d98 100644 (file)
@@ -500,6 +500,9 @@ STORAGE_DRIVER_MPATH_SOURCES =                                      \
 STORAGE_DRIVER_DISK_SOURCES =                                  \
                storage/storage_backend_disk.h storage/storage_backend_disk.c
 
+STORAGE_DRIVER_RBD_SOURCES =                                   \
+               storage/storage_backend_rbd.h storage/storage_backend_rbd.c
+
 STORAGE_HELPER_DISK_SOURCES =                                  \
                storage/parthelper.c
 
@@ -1047,6 +1050,11 @@ if WITH_STORAGE_DISK
 libvirt_driver_storage_la_SOURCES += $(STORAGE_DRIVER_DISK_SOURCES)
 endif
 
+if WITH_STORAGE_RBD
+libvirt_driver_storage_la_SOURCES += $(STORAGE_DRIVER_RBD_SOURCES)
+libvirt_driver_storage_la_LIBADD += $(LIBRBD_LIBS)
+endif
+
 if WITH_NODE_DEVICES
 # Needed to keep automake quiet about conditionals
 if WITH_DRIVER_MODULES
@@ -1146,6 +1154,7 @@ EXTRA_DIST +=                                                     \
                $(STORAGE_DRIVER_SCSI_SOURCES)                  \
                $(STORAGE_DRIVER_MPATH_SOURCES)                 \
                $(STORAGE_DRIVER_DISK_SOURCES)                  \
+               $(STORAGE_DRIVER_RBD_SOURCES)                   \
                $(NODE_DEVICE_DRIVER_SOURCES)                   \
                $(NODE_DEVICE_DRIVER_HAL_SOURCES)               \
                $(NODE_DEVICE_DRIVER_UDEV_SOURCES)              \
index 188af6db6c3701bc555f19544c9585ea84b10c20..bf4567f31190e958bfa2463ef9ca5ca0600bfec2 100644 (file)
@@ -52,7 +52,7 @@ VIR_ENUM_IMPL(virStoragePool,
               VIR_STORAGE_POOL_LAST,
               "dir", "fs", "netfs",
               "logical", "disk", "iscsi",
-              "scsi", "mpath")
+              "scsi", "mpath", "rbd")
 
 VIR_ENUM_IMPL(virStoragePoolFormatFileSystem,
               VIR_STORAGE_POOL_FS_LAST,
@@ -110,6 +110,7 @@ enum {
     VIR_STORAGE_POOL_SOURCE_ADAPTER         = (1<<3),
     VIR_STORAGE_POOL_SOURCE_NAME            = (1<<4),
     VIR_STORAGE_POOL_SOURCE_INITIATOR_IQN   = (1<<5),
+    VIR_STORAGE_POOL_SOURCE_NETWORK         = (1<<6),
 };
 
 
@@ -194,6 +195,17 @@ static virStoragePoolTypeInfo poolTypeInfo[] = {
             .formatToString = virStoragePoolFormatDiskTypeToString,
         }
     },
+    { .poolType = VIR_STORAGE_POOL_RBD,
+      .poolOptions = {
+             .flags = (VIR_STORAGE_POOL_SOURCE_HOST |
+                       VIR_STORAGE_POOL_SOURCE_NETWORK |
+                       VIR_STORAGE_POOL_SOURCE_NAME),
+        },
+       .volOptions = {
+            .defaultFormat = VIR_STORAGE_FILE_RAW,
+            .formatToString = virStoragePoolFormatDiskTypeToString,
+        }
+    },
     { .poolType = VIR_STORAGE_POOL_MPATH,
       .volOptions = {
             .formatToString = virStoragePoolFormatDiskTypeToString,
@@ -297,6 +309,11 @@ virStoragePoolSourceClear(virStoragePoolSourcePtr source)
         VIR_FREE(source->auth.chap.login);
         VIR_FREE(source->auth.chap.passwd);
     }
+
+    if (source->authType == VIR_STORAGE_POOL_AUTH_CEPHX) {
+        VIR_FREE(source->auth.cephx.username);
+        VIR_FREE(source->auth.cephx.secret.usage);
+    }
 }
 
 void
@@ -398,6 +415,34 @@ virStoragePoolDefParseAuthChap(xmlXPathContextPtr ctxt,
     return 0;
 }
 
+static int
+virStoragePoolDefParseAuthCephx(xmlXPathContextPtr ctxt,
+                               virStoragePoolAuthCephxPtr auth) {
+    char *uuid = NULL;
+    auth->username = virXPathString("string(./auth/@username)", ctxt);
+    if (auth->username == NULL) {
+        virStorageReportError(VIR_ERR_XML_ERROR,
+                              "%s", _("missing auth username attribute"));
+        return -1;
+    }
+
+    uuid = virXPathString("string(./auth/secret/@uuid)", ctxt);
+    auth->secret.usage = virXPathString("string(./auth/secret/@usage)", ctxt);
+    if (uuid == NULL && auth->secret.usage == NULL) {
+        virStorageReportError(VIR_ERR_XML_ERROR, "%s",
+                              _("missing auth secret uuid or usage attribute"));
+        return -1;
+    }
+
+    if (virUUIDParse(uuid, auth->secret.uuid) < 0) {
+        virStorageReportError(VIR_ERR_XML_ERROR,
+                              "%s", _("invalid auth secret uuid"));
+        return -1;
+    }
+
+    return 0;
+}
+
 static int
 virStoragePoolDefParseSource(xmlXPathContextPtr ctxt,
                              virStoragePoolSourcePtr source,
@@ -419,6 +464,11 @@ virStoragePoolDefParseSource(xmlXPathContextPtr ctxt,
     }
 
     source->name = virXPathString("string(./name)", ctxt);
+    if (pool_type == VIR_STORAGE_POOL_RBD && source->name == NULL) {
+        virStorageReportError(VIR_ERR_XML_ERROR, "%s",
+                              _("missing mandatory 'name' field for RBD pool name"));
+        goto cleanup;
+    }
 
     if (options->formatFromString) {
         char *format = virXPathString("string(./format/@type)", ctxt);
@@ -501,6 +551,8 @@ virStoragePoolDefParseSource(xmlXPathContextPtr ctxt,
     } else {
         if (STREQ(authType, "chap")) {
             source->authType = VIR_STORAGE_POOL_AUTH_CHAP;
+        } else if (STREQ(authType, "ceph")) {
+            source->authType = VIR_STORAGE_POOL_AUTH_CEPHX;
         } else {
             virStorageReportError(VIR_ERR_XML_ERROR,
                                   _("unknown auth type '%s'"),
@@ -514,6 +566,11 @@ virStoragePoolDefParseSource(xmlXPathContextPtr ctxt,
             goto cleanup;
     }
 
+    if (source->authType == VIR_STORAGE_POOL_AUTH_CEPHX) {
+        if (virStoragePoolDefParseAuthCephx(ctxt, &source->auth.cephx) < 0)
+            goto cleanup;
+    }
+
     source->vendor = virXPathString("string(./vendor/@name)", ctxt);
     source->product = virXPathString("string(./product/@name)", ctxt);
 
@@ -741,20 +798,23 @@ virStoragePoolDefParseXML(xmlXPathContextPtr ctxt) {
         }
     }
 
-    if ((tmppath = virXPathString("string(./target/path)", ctxt)) == NULL) {
-        virStorageReportError(VIR_ERR_XML_ERROR,
-                              "%s", _("missing storage pool target path"));
-        goto cleanup;
-    }
-    ret->target.path = virFileSanitizePath(tmppath);
-    VIR_FREE(tmppath);
-    if (!ret->target.path)
-        goto cleanup;
-
+    /* When we are working with a virtual disk we can skip the target
+     * path and permissions */
+    if (!(options->flags & VIR_STORAGE_POOL_SOURCE_NETWORK)) {
+        if ((tmppath = virXPathString("string(./target/path)", ctxt)) == NULL) {
+            virStorageReportError(VIR_ERR_XML_ERROR,
+                                  "%s", _("missing storage pool target path"));
+            goto cleanup;
+        }
+        ret->target.path = virFileSanitizePath(tmppath);
+        VIR_FREE(tmppath);
+        if (!ret->target.path)
+            goto cleanup;
 
-    if (virStorageDefParsePerms(ctxt, &ret->target.perms,
-                                "./target/permissions", 0700) < 0)
-        goto cleanup;
+        if (virStorageDefParsePerms(ctxt, &ret->target.perms,
+                                    "./target/permissions", 0700) < 0)
+            goto cleanup;
+    }
 
     return ret;
 
@@ -822,6 +882,7 @@ virStoragePoolSourceFormat(virBufferPtr buf,
                            virStoragePoolSourcePtr src)
 {
     int i, j;
+    char uuid[VIR_UUID_STRING_BUFLEN];
 
     virBufferAddLit(buf,"  <source>\n");
     if ((options->flags & VIR_STORAGE_POOL_SOURCE_HOST) && src->nhost) {
@@ -885,6 +946,24 @@ virStoragePoolSourceFormat(virBufferPtr buf,
                           src->auth.chap.login,
                           src->auth.chap.passwd);
 
+    if (src->authType == VIR_STORAGE_POOL_AUTH_CEPHX) {
+        virBufferAsprintf(buf,"    <auth username='%s' type='ceph'>\n",
+                          src->auth.cephx.username);
+
+        virBufferAsprintf(buf,"      %s", "<secret");
+        if (src->auth.cephx.secret.uuid != NULL) {
+            virUUIDFormat(src->auth.cephx.secret.uuid, uuid);
+            virBufferAsprintf(buf," uuid='%s'", uuid);
+        }
+
+        if (src->auth.cephx.secret.usage != NULL) {
+            virBufferAsprintf(buf," usage='%s'", src->auth.cephx.secret.usage);
+        }
+        virBufferAsprintf(buf,"%s", "/>\n");
+
+        virBufferAsprintf(buf,"    %s", "</auth>\n");
+    }
+
     if (src->vendor != NULL) {
         virBufferEscapeString(buf,"    <vendor name='%s'/>\n", src->vendor);
     }
@@ -932,25 +1011,29 @@ virStoragePoolDefFormat(virStoragePoolDefPtr def) {
     if (virStoragePoolSourceFormat(&buf, options, &def->source) < 0)
         goto cleanup;
 
-    virBufferAddLit(&buf,"  <target>\n");
+    /* RBD devices are not local block devs nor files, so it doesn't
+     * have a target */
+    if (def->type != VIR_STORAGE_POOL_RBD) {
+        virBufferAddLit(&buf,"  <target>\n");
 
-    if (def->target.path)
-        virBufferAsprintf(&buf,"    <path>%s</path>\n", def->target.path);
+        if (def->target.path)
+            virBufferAsprintf(&buf,"    <path>%s</path>\n", def->target.path);
 
-    virBufferAddLit(&buf,"    <permissions>\n");
-    virBufferAsprintf(&buf,"      <mode>0%o</mode>\n",
-                      def->target.perms.mode);
-    virBufferAsprintf(&buf,"      <owner>%u</owner>\n",
-                      (unsigned int) def->target.perms.uid);
-    virBufferAsprintf(&buf,"      <group>%u</group>\n",
-                      (unsigned int) def->target.perms.gid);
+        virBufferAddLit(&buf,"    <permissions>\n");
+        virBufferAsprintf(&buf,"      <mode>0%o</mode>\n",
+                          def->target.perms.mode);
+        virBufferAsprintf(&buf,"      <owner>%u</owner>\n",
+                          (unsigned int) def->target.perms.uid);
+        virBufferAsprintf(&buf,"      <group>%u</group>\n",
+                          (unsigned int) def->target.perms.gid);
 
-    if (def->target.perms.label)
-        virBufferAsprintf(&buf,"      <label>%s</label>\n",
-                          def->target.perms.label);
+        if (def->target.perms.label)
+            virBufferAsprintf(&buf,"      <label>%s</label>\n",
+                            def->target.perms.label);
 
-    virBufferAddLit(&buf,"    </permissions>\n");
-    virBufferAddLit(&buf,"  </target>\n");
+        virBufferAddLit(&buf,"    </permissions>\n");
+        virBufferAddLit(&buf,"  </target>\n");
+    }
     virBufferAddLit(&buf,"</pool>\n");
 
     if (virBufferError(&buf))
index 9222c4af36f28116daa0b3edfa260b41d862b6b0..5733b575bb1952cff4ecd2ee1a6a40681943fd2f 100644 (file)
@@ -120,6 +120,7 @@ enum virStoragePoolType {
     VIR_STORAGE_POOL_ISCSI,    /* iSCSI targets */
     VIR_STORAGE_POOL_SCSI,     /* SCSI HBA */
     VIR_STORAGE_POOL_MPATH,    /* Multipath devices */
+    VIR_STORAGE_POOL_RBD,      /* RADOS Block Device */
 
     VIR_STORAGE_POOL_LAST,
 };
@@ -137,6 +138,7 @@ enum virStoragePoolDeviceType {
 enum virStoragePoolAuthType {
     VIR_STORAGE_POOL_AUTH_NONE,
     VIR_STORAGE_POOL_AUTH_CHAP,
+    VIR_STORAGE_POOL_AUTH_CEPHX,
 };
 
 typedef struct _virStoragePoolAuthChap virStoragePoolAuthChap;
@@ -146,6 +148,15 @@ struct _virStoragePoolAuthChap {
     char *passwd;
 };
 
+typedef struct _virStoragePoolAuthCephx virStoragePoolAuthCephx;
+typedef virStoragePoolAuthCephx *virStoragePoolAuthCephxPtr;
+struct _virStoragePoolAuthCephx {
+    char *username;
+    struct {
+            unsigned char uuid[VIR_UUID_BUFLEN];
+            char *usage;
+    } secret;
+};
 
 /*
  * For remote pools, info on how to reach the host
@@ -235,6 +246,7 @@ struct _virStoragePoolSource {
     int authType;       /* virStoragePoolAuthType */
     union {
         virStoragePoolAuthChap chap;
+        virStoragePoolAuthCephx cephx;
     } auth;
 
     /* Vendor of the source */
index caac2f8d93f69088b9b409083534e391c0892c2d..e2e9b516f761466a555115a31fd9496e20b97e18 100644 (file)
@@ -77,6 +77,9 @@
 #if WITH_STORAGE_DIR
 # include "storage_backend_fs.h"
 #endif
+#if WITH_STORAGE_RBD
+# include "storage_backend_rbd.h"
+#endif
 
 #define VIR_FROM_THIS VIR_FROM_STORAGE
 
@@ -102,6 +105,9 @@ static virStorageBackendPtr backends[] = {
 #endif
 #if WITH_STORAGE_DISK
     &virStorageBackendDisk,
+#endif
+#if WITH_STORAGE_RBD
+    &virStorageBackendRBD,
 #endif
     NULL
 };
diff --git a/src/storage/storage_backend_rbd.c b/src/storage/storage_backend_rbd.c
new file mode 100644 (file)
index 0000000..3e7464c
--- /dev/null
@@ -0,0 +1,546 @@
+/*
+ * storage_backend_rbd.c: storage backend for RBD (RADOS Block Device) handling
+ *
+ * Copyright (C) 2012 Wido den Hollander
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
+ *
+ * Author: Wido den Hollander <wido@widodh.nl>
+ */
+
+#include <config.h>
+
+#include "virterror_internal.h"
+#include "storage_backend_rbd.h"
+#include "storage_conf.h"
+#include "util.h"
+#include "memory.h"
+#include "logging.h"
+#include "base64.h"
+#include "uuid.h"
+#include "rados/librados.h"
+#include "rbd/librbd.h"
+
+#define VIR_FROM_THIS VIR_FROM_STORAGE
+
+struct _virStorageBackendRBDState {
+    rados_t cluster;
+    rados_ioctx_t ioctx;
+    time_t starttime;
+};
+
+typedef struct _virStorageBackendRBDState virStorageBackendRBDState;
+typedef virStorageBackendRBDState virStorageBackendRBDStatePtr;
+
+static int virStorageBackendRBDOpenRADOSConn(virStorageBackendRBDStatePtr *ptr,
+                                             virConnectPtr conn,
+                                             virStoragePoolObjPtr pool)
+{
+    int ret = -1;
+    unsigned char *secret_value = NULL;
+    size_t secret_value_size;
+    char *rados_key = NULL;
+    virBuffer mon_host = VIR_BUFFER_INITIALIZER;
+    virSecretPtr secret = NULL;
+    char secretUuid[VIR_UUID_STRING_BUFLEN];
+    int i;
+    char *mon_buff = NULL;
+
+    VIR_DEBUG("Found Cephx username: %s",
+              pool->def->source.auth.cephx.username);
+
+    if (pool->def->source.auth.cephx.username != NULL) {
+        VIR_DEBUG("Using cephx authorization");
+        if (rados_create(&ptr->cluster,
+            pool->def->source.auth.cephx.username) < 0) {
+            virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                                  _("failed to initialize RADOS"));
+            goto cleanup;
+        }
+
+        if (pool->def->source.auth.cephx.secret.uuid != NULL) {
+            virUUIDFormat(pool->def->source.auth.cephx.secret.uuid, secretUuid);
+            VIR_DEBUG("Looking up secret by UUID: %s", secretUuid);
+            secret = virSecretLookupByUUIDString(conn, secretUuid);
+        }
+
+        if (pool->def->source.auth.cephx.secret.usage != NULL) {
+            VIR_DEBUG("Looking up secret by usage: %s",
+                      pool->def->source.auth.cephx.secret.usage);
+            secret = virSecretLookupByUsage(conn, VIR_SECRET_USAGE_TYPE_CEPH,
+                                            pool->def->source.auth.cephx.secret.usage);
+        }
+
+        if (secret == NULL) {
+            virStorageReportError(VIR_ERR_NO_SECRET,
+                                  _("failed to find the secret"));
+            goto cleanup;
+        }
+
+        secret_value = virSecretGetValue(secret, &secret_value_size, 0);
+        base64_encode_alloc((char *)secret_value,
+                            secret_value_size, &rados_key);
+        memset(secret_value, 0, secret_value_size);
+
+        if (rados_key == NULL) {
+            virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                                  _("failed to decode the RADOS key"));
+            goto cleanup;
+        }
+
+        VIR_DEBUG("Found cephx key: %s", rados_key);
+        if (rados_conf_set(ptr->cluster, "key", rados_key) < 0) {
+            virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                                  _("failed to set RADOS option: %s"),
+                                  "rados_key");
+            goto cleanup;
+        }
+
+        memset(rados_key, 0, strlen(rados_key));
+
+        if (rados_conf_set(ptr->cluster, "auth_supported", "cephx") < 0) {
+            virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                                  _("failed to set RADOS option: %s"),
+                                  "auth_supported");
+            goto cleanup;
+        }
+    } else {
+        VIR_DEBUG("Not using cephx authorization");
+        if (rados_create(&ptr->cluster, NULL) < 0) {
+            virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                                  _("failed to create the RADOS cluster"));
+            goto cleanup;
+        }
+        if (rados_conf_set(ptr->cluster, "auth_supported", "none") < 0) {
+            virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                                  _("failed to set RADOS option: %s"),
+                                  "auth_supported");
+            goto cleanup;
+        }
+    }
+
+    VIR_DEBUG("Found %zu RADOS cluster monitors in the pool configuration",
+              pool->def->source.nhost);
+
+    for (i = 0; i < pool->def->source.nhost; i++) {
+        if (pool->def->source.hosts[i].name != NULL &&
+            !pool->def->source.hosts[i].port) {
+            virBufferAsprintf(&mon_host, "%s:6789,",
+                              pool->def->source.hosts[i].name);
+        } else if (pool->def->source.hosts[i].name != NULL &&
+            pool->def->source.hosts[i].port) {
+            virBufferAsprintf(&mon_host, "%s:%d,",
+                              pool->def->source.hosts[i].name,
+                              pool->def->source.hosts[i].port);
+        } else {
+            virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                                  _("received malformed monitor, check the XML definition"));
+        }
+    }
+
+    if (virBufferError(&mon_host)) {
+       virReportOOMError();
+       goto cleanup;
+    }
+
+    mon_buff = virBufferContentAndReset(&mon_host);
+    VIR_DEBUG("RADOS mon_host has been set to: %s", mon_buff);
+    if (rados_conf_set(ptr->cluster, "mon_host", mon_buff) < 0) {
+       virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                             _("failed to set RADOS option: %s"),
+                             "mon_host");
+        goto cleanup;
+    }
+
+    ptr->starttime = time(0);
+    if (rados_connect(ptr->cluster) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                              _("failed to connect to the RADOS monitor on: %s"),
+                              mon_buff);
+        goto cleanup;
+    }
+
+    ret = 0;
+
+cleanup:
+    VIR_FREE(secret_value);
+    VIR_FREE(rados_key);
+    virSecretFree(secret);
+    virBufferFreeAndReset(&mon_host);
+    VIR_FREE(mon_buff);
+    return ret;
+}
+
+static int virStorageBackendRBDCloseRADOSConn(virStorageBackendRBDStatePtr ptr)
+{
+    int ret = 0;
+
+    if (ptr.ioctx != NULL) {
+        VIR_DEBUG("Closing RADOS IoCTX");
+        rados_ioctx_destroy(ptr.ioctx);
+        ret = -1;
+    }
+    ptr.ioctx = NULL;
+
+    if (ptr.cluster != NULL) {
+        VIR_DEBUG("Closing RADOS connection");
+        rados_shutdown(ptr.cluster);
+        ret = -2;
+    }
+    ptr.cluster = NULL;
+
+    time_t runtime = time(0) - ptr.starttime;
+    VIR_DEBUG("RADOS connection existed for %ld seconds", runtime);
+
+    return ret;
+}
+
+static int volStorageBackendRBDRefreshVolInfo(virStorageVolDefPtr vol,
+                                              virStoragePoolObjPtr pool,
+                                              virStorageBackendRBDStatePtr ptr)
+{
+    int ret = -1;
+    rbd_image_t image;
+    if (rbd_open(ptr.ioctx, vol->name, &image, NULL) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                              _("failed to open the RBD image '%s'"),
+                              vol->name);
+        return ret;
+    }
+
+    rbd_image_info_t info;
+    if (rbd_stat(image, &info, sizeof(info)) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                              _("failed to stat the RBD image"));
+        goto cleanup;
+    }
+
+    VIR_DEBUG("Refreshed RBD image %s/%s (size: %llu obj_size: %llu num_objs: %llu)",
+              pool->def->source.name, vol->name, (unsigned long long)info.size,
+              (unsigned long long)info.obj_size,
+              (unsigned long long)info.num_objs);
+
+    vol->capacity = info.size;
+    vol->allocation = info.obj_size * info.num_objs;
+    vol->type = VIR_STORAGE_VOL_NETWORK;
+
+    VIR_FREE(vol->target.path);
+    if (virAsprintf(&vol->target.path, "rbd:%s/%s",
+                    pool->def->source.name,
+                    vol->name) == -1) {
+        virReportOOMError();
+        goto cleanup;
+    }
+
+    VIR_FREE(vol->key);
+    if (virAsprintf(&vol->key, "%s/%s",
+                    pool->def->source.name,
+                    vol->name) == -1) {
+        virReportOOMError();
+        goto cleanup;
+    }
+
+    ret = 0;
+
+cleanup:
+    rbd_close(image);
+    return ret;
+}
+
+static int virStorageBackendRBDRefreshPool(virConnectPtr conn ATTRIBUTE_UNUSED,
+                                           virStoragePoolObjPtr pool)
+{
+    size_t max_size = 1024;
+    int ret = -1;
+    int len = -1;
+    int i;
+    char *name, *names = NULL;
+    virStorageBackendRBDStatePtr ptr;
+    ptr.cluster = NULL;
+    ptr.ioctx = NULL;
+
+    if (virStorageBackendRBDOpenRADOSConn(&ptr, conn, pool) < 0) {
+        goto cleanup;
+    }
+
+    if (rados_ioctx_create(ptr.cluster,
+        pool->def->source.name, &ptr.ioctx) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                              _("failed to create the RBD IoCTX. Does the pool '%s' exist?"),
+                              pool->def->source.name);
+        goto cleanup;
+    }
+
+    struct rados_cluster_stat_t stat;
+    if (rados_cluster_stat(ptr.cluster, &stat) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                              _("failed to stat the RADOS cluster"));
+        goto cleanup;
+    }
+
+    struct rados_pool_stat_t poolstat;
+    if (rados_ioctx_pool_stat(ptr.ioctx, &poolstat) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                              _("failed to stat the RADOS pool '%s'"),
+                              pool->def->source.name);
+        goto cleanup;
+    }
+
+    pool->def->capacity = stat.kb * 1024;
+    pool->def->available = stat.kb_avail * 1024;
+    pool->def->allocation = poolstat.num_bytes;
+
+    VIR_DEBUG("Utilization of RBD pool %s: (kb: %llu kb_avail: %llu num_bytes: %llu)",
+              pool->def->source.name, (unsigned long long)stat.kb,
+              (unsigned long long)stat.kb_avail,
+              (unsigned long long)poolstat.num_bytes);
+
+    while (true) {
+        if (VIR_ALLOC_N(names, max_size) < 0)
+            goto out_of_memory;
+
+        len = rbd_list(ptr.ioctx, names, &max_size);
+        if (len >= 0)
+            break;
+        if (len != -ERANGE) {
+            VIR_WARN("%s", _("A problem occured while listing RBD images"));
+            goto cleanup;
+        }
+    }
+
+    for (i = 0, name = names; name < names + max_size; i++) {
+        if (VIR_REALLOC_N(pool->volumes.objs, pool->volumes.count + 1) < 0) {
+            virStoragePoolObjClearVols(pool);
+            goto out_of_memory;
+        }
+
+        virStorageVolDefPtr vol;
+        if (VIR_ALLOC(vol) < 0)
+            goto out_of_memory;
+
+        vol->name = strdup(name);
+        if (vol->name == NULL)
+            goto out_of_memory;
+
+        if (STREQ(vol->name, ""))
+            break;
+
+        name += strlen(name) + 1;
+
+        if (volStorageBackendRBDRefreshVolInfo(vol, pool, ptr) < 0)
+            goto cleanup;
+
+        pool->volumes.objs[pool->volumes.count++] = vol;
+    }
+
+    VIR_DEBUG("Found %d images in RBD pool %s",
+              pool->volumes.count, pool->def->source.name);
+
+    ret = 0;
+
+cleanup:
+    VIR_FREE(names);
+    virStorageBackendRBDCloseRADOSConn(ptr);
+    return ret;
+
+out_of_memory:
+    virReportOOMError();
+    goto cleanup;
+}
+
+static int virStorageBackendRBDDeleteVol(virConnectPtr conn,
+                                         virStoragePoolObjPtr pool,
+                                         virStorageVolDefPtr vol,
+                                         unsigned int flags)
+{
+    int ret = -1;
+    virStorageBackendRBDStatePtr ptr;
+    ptr.cluster = NULL;
+    ptr.ioctx = NULL;
+
+    VIR_DEBUG("Removing RBD image %s/%s", pool->def->source.name, vol->name);
+
+    if (flags & VIR_STORAGE_VOL_DELETE_ZEROED) {
+        VIR_WARN("%s", _("This storage backend does not supported zeroed removal of volumes"));
+    }
+
+    if (virStorageBackendRBDOpenRADOSConn(&ptr, conn, pool) < 0) {
+        goto cleanup;
+    }
+
+    if (rados_ioctx_create(ptr.cluster,
+        pool->def->source.name, &ptr.ioctx) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                              _("failed to create the RBD IoCTX. Does the pool '%s' exist?"),
+                              pool->def->source.name);
+        goto cleanup;
+    }
+
+    if (rbd_remove(ptr.ioctx, vol->name) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                              _("failed to remove volume '%s/%s'"),
+                              pool->def->source.name,
+                              vol->name);
+        goto cleanup;
+    }
+
+    ret = 0;
+
+cleanup:
+    virStorageBackendRBDCloseRADOSConn(ptr);
+    return ret;
+}
+
+static int virStorageBackendRBDCreateVol(virConnectPtr conn,
+                                         virStoragePoolObjPtr pool,
+                                         virStorageVolDefPtr vol)
+{
+    virStorageBackendRBDStatePtr ptr;
+    ptr.cluster = NULL;
+    ptr.ioctx = NULL;
+    int order = 0;
+    int ret = -1;
+
+    VIR_DEBUG("Creating RBD image %s/%s with size %llu",
+              pool->def->source.name,
+              vol->name, vol->capacity);
+
+    if (virStorageBackendRBDOpenRADOSConn(&ptr, conn, pool) < 0) {
+        goto cleanup;
+    }
+
+    if (rados_ioctx_create(ptr.cluster,
+        pool->def->source.name,&ptr.ioctx) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                               _("failed to create the RBD IoCTX. Does the pool '%s' exist?"),
+                               pool->def->source.name);
+        goto cleanup;
+    }
+
+    if (vol->target.encryption != NULL) {
+        virStorageReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+                              _("storage pool does not support encrypted volumes"));
+        goto cleanup;
+    }
+
+    if (rbd_create(ptr.ioctx, vol->name, vol->capacity, &order) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                              _("failed to create volume '%s/%s'"),
+                              pool->def->source.name,
+                              vol->name);
+        goto cleanup;
+    }
+
+    if (volStorageBackendRBDRefreshVolInfo(vol, pool, ptr) < 0) {
+        goto cleanup;
+    }
+
+    ret = 0;
+
+cleanup:
+    virStorageBackendRBDCloseRADOSConn(ptr);
+    return ret;
+}
+
+static int virStorageBackendRBDRefreshVol(virConnectPtr conn,
+                                          virStoragePoolObjPtr pool ATTRIBUTE_UNUSED,
+                                          virStorageVolDefPtr vol)
+{
+    virStorageBackendRBDStatePtr ptr;
+    ptr.cluster = NULL;
+    ptr.ioctx = NULL;
+    int ret = -1;
+
+    if (virStorageBackendRBDOpenRADOSConn(&ptr, conn, pool) < 0) {
+        goto cleanup;
+    }
+
+    if (rados_ioctx_create(ptr.cluster,
+        pool->def->source.name, &ptr.ioctx) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                               _("failed to create the RBD IoCTX. Does the pool '%s' exist?"),
+                               pool->def->source.name);
+        goto cleanup;
+    }
+
+    if (volStorageBackendRBDRefreshVolInfo(vol, pool, ptr) < 0) {
+        goto cleanup;
+    }
+
+    ret = 0;
+
+cleanup:
+    virStorageBackendRBDCloseRADOSConn(ptr);
+    return ret;
+}
+
+static int virStorageBackendRBDResizeVol(virConnectPtr conn ATTRIBUTE_UNUSED,
+                                     virStoragePoolObjPtr pool ATTRIBUTE_UNUSED,
+                                     virStorageVolDefPtr vol,
+                                     unsigned long long capacity,
+                                     unsigned int flags)
+{
+    virStorageBackendRBDStatePtr ptr;
+    ptr.cluster = NULL;
+    ptr.ioctx = NULL;
+    rbd_image_t image = NULL;
+    int ret = -1;
+
+    virCheckFlags(0, -1);
+
+    if (virStorageBackendRBDOpenRADOSConn(&ptr, conn, pool) < 0) {
+        goto cleanup;
+    }
+
+    if (rados_ioctx_create(ptr.cluster,
+        pool->def->source.name, &ptr.ioctx) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                               _("failed to create the RBD IoCTX. Does the pool '%s' exist?"),
+                               pool->def->source.name);
+        goto cleanup;
+    }
+
+    if (rbd_open(ptr.ioctx, vol->name, &image, NULL) < 0) {
+       virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                            _("failed to open the RBD image '%s'"),
+                            vol->name);
+       goto cleanup;
+    }
+
+    if (rbd_resize(image, capacity) < 0) {
+        virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+                            _("failed to resize the RBD image '%s'"),
+                            vol->name);
+        goto cleanup;
+    }
+
+    ret = 0;
+
+cleanup:
+    if (image != NULL)
+       rbd_close(image);
+    virStorageBackendRBDCloseRADOSConn(ptr);
+    return ret;
+}
+
+virStorageBackend virStorageBackendRBD = {
+    .type = VIR_STORAGE_POOL_RBD,
+
+    .refreshPool = virStorageBackendRBDRefreshPool,
+    .createVol = virStorageBackendRBDCreateVol,
+    .refreshVol = virStorageBackendRBDRefreshVol,
+    .deleteVol = virStorageBackendRBDDeleteVol,
+    .resizeVol = virStorageBackendRBDResizeVol,
+};
diff --git a/src/storage/storage_backend_rbd.h b/src/storage/storage_backend_rbd.h
new file mode 100644 (file)
index 0000000..2ae2513
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * storage_backend_rbd.h: storage backend for RBD (RADOS Block Device) handling
+ *
+ * Copyright (C) 2012 Wido den Hollander
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
+ *
+ * Author: Wido den Hollander <wido@widodh.nl>
+ */
+
+#ifndef __VIR_STORAGE_BACKEND_RBD_H__
+# define __VIR_STORAGE_BACKEND_RBD_H__
+
+# include "storage_backend.h"
+
+extern virStorageBackend virStorageBackendRBD;
+
+#endif /* __VIR_STORAGE_BACKEND_RBD_H__ */
diff --git a/tests/storagepoolxml2xmlin/pool-rbd.xml b/tests/storagepoolxml2xmlin/pool-rbd.xml
new file mode 100644 (file)
index 0000000..4ee8d56
--- /dev/null
@@ -0,0 +1,11 @@
+<pool type='rbd'>
+  <name>ceph</name>
+  <source>
+    <name>rbd</name>
+    <host name='localhost' port='6789'/>
+    <host name='localhost' port='6790'/>
+    <auth username='admin' type='ceph'>
+       <secret uuid='2ec115d7-3a88-3ceb-bc12-0ac909a6fd87'/>
+    </auth>
+  </source>
+</pool>
diff --git a/tests/storagepoolxml2xmlout/pool-rbd.xml b/tests/storagepoolxml2xmlout/pool-rbd.xml
new file mode 100644 (file)
index 0000000..309a6d9
--- /dev/null
@@ -0,0 +1,15 @@
+<pool type='rbd'>
+  <name>ceph</name>
+  <uuid>47c1faee-0207-e741-f5ae-d9b019b98fe2</uuid>
+  <capacity unit='bytes'>0</capacity>
+  <allocation unit='bytes'>0</allocation>
+  <available unit='bytes'>0</available>
+  <source>
+    <name>rbd</name>
+    <host name='localhost' port='6789'/>
+    <host name='localhost' port='6790'/>
+    <auth username='admin' type='ceph'>
+      <secret uuid='2ec115d7-3a88-3ceb-bc12-0ac909a6fd87'/>
+    </auth>
+  </source>
+</pool>
index 46239fa027f6421916710998d632933083555dcb..ffe6ed2b3c4cf24befaccacb15960d87cceea137 100644 (file)
@@ -12203,6 +12203,10 @@ cmdVolInfo(vshControl *ctl, const vshCmd *cmd)
             vshPrint(ctl, "%-15s %s\n", _("Type:"), _("dir"));
             break;
 
+        case VIR_STORAGE_VOL_NETWORK:
+            vshPrint(ctl, "%-15s %s\n", _("Type:"), _("network"));
+            break;
+
         default:
             vshPrint(ctl, "%-15s %s\n", _("Type:"), _("unknown"));
         }
@@ -20137,6 +20141,9 @@ vshShowVersion(vshControl *ctl ATTRIBUTE_UNUSED)
 #endif
 #ifdef WITH_STORAGE_LVM
     vshPrint(ctl, " LVM");
+#endif
+#ifdef WITH_STORAGE_RBD
+    vshPrint(ctl, " RBD");
 #endif
     vshPrint(ctl, "\n");