From 97e3f0aa013dad49de1a55f8025327de7e801822 Mon Sep 17 00:00:00 2001
From: Jason Dillaman <dillaman@redhat.com>
Date: Wed, 20 Jun 2018 22:20:14 -0400
Subject: [PATCH 5/5] qa/workunits/rados: test pool op permissions
Signed-off-by: Jason Dillaman <dillaman@redhat.com>
---
qa/workunits/rados/test_pool_access.sh | 99 ++++++++++++++++++++++++--
1 file changed, 92 insertions(+), 7 deletions(-)
diff --git a/qa/workunits/rados/test_pool_access.sh b/qa/workunits/rados/test_pool_access.sh
index 8597b71..947b57e 100755
--- a/qa/workunits/rados/test_pool_access.sh
+++ b/qa/workunits/rados/test_pool_access.sh
@@ -2,22 +2,107 @@
set -e
-expect_1()
+KEYRING=$(mktemp)
+trap cleanup EXIT ERR HUP INT QUIT
+
+cleanup() {
+ (ceph auth del client.mon_read || true) >/dev/null 2>&1
+ (ceph auth del client.mon_write || true) >/dev/null 2>&1
+
+ rm -f $KEYRING
+}
+
+expect_false()
{
- set -x
- set +e
- "$@"
- if [ $? == 1 ]; then return 0; else return 1; fi
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+create_pool_op() {
+ ID=$1
+ POOL=$2
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python
+import rados
+
+cluster = rados.Rados(conffile="", rados_id="${ID}")
+cluster.connect()
+cluster.create_pool("${POOL}")
+EOF
}
+delete_pool_op() {
+ ID=$1
+ POOL=$2
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python
+import rados
+
+cluster = rados.Rados(conffile="", rados_id="${ID}")
+cluster.connect()
+cluster.delete_pool("${POOL}")
+EOF
+}
+
+create_pool_snap_op() {
+ ID=$1
+ POOL=$2
+ SNAP=$3
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python
+import rados
+
+cluster = rados.Rados(conffile="", rados_id="${ID}")
+cluster.connect()
+ioctx = cluster.open_ioctx("${POOL}")
+
+ioctx.create_snap("${SNAP}")
+EOF
+}
+
+remove_pool_snap_op() {
+ ID=$1
+ POOL=$2
+ SNAP=$3
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python
+import rados
+
+cluster = rados.Rados(conffile="", rados_id="${ID}")
+cluster.connect()
+ioctx = cluster.open_ioctx("${POOL}")
+
+ioctx.remove_snap("${SNAP}")
+EOF
+}
+
+test_pool_op()
+{
+ ceph auth get-or-create client.mon_read mon 'allow r' >> $KEYRING
+ ceph auth get-or-create client.mon_write mon 'allow *' >> $KEYRING
+
+ expect_false create_pool_op mon_read pool1
+ create_pool_op mon_write pool1
+
+ expect_false create_pool_snap_op mon_read pool1 snap1
+ create_pool_snap_op mon_write pool1 snap1
+
+ expect_false remove_pool_snap_op mon_read pool1 snap1
+ remove_pool_snap_op mon_write pool1 snap1
+
+ expect_false delete_pool_op mon_read pool1
+ delete_pool_op mon_write pool1
+}
key=`ceph auth get-or-create-key client.poolaccess1 mon 'allow r' osd 'allow *'`
rados --id poolaccess1 --key $key -p rbd ls
key=`ceph auth get-or-create-key client.poolaccess2 mon 'allow r' osd 'allow * pool=nopool'`
-expect_1 rados --id poolaccess2 --key $key -p rbd ls
+expect_false rados --id poolaccess2 --key $key -p rbd ls
key=`ceph auth get-or-create-key client.poolaccess3 mon 'allow r' osd 'allow rw pool=nopool'`
-expect_1 rados --id poolaccess3 --key $key -p rbd ls
+expect_false rados --id poolaccess3 --key $key -p rbd ls
+
+test_pool_op
echo OK
From b12dd0bf419ae834abb31c712830fa9c4b5cda9c Mon Sep 17 00:00:00 2001
From: Jason Dillaman <dillaman@redhat.com>
Date: Tue, 5 Jun 2018 15:40:44 -0400
Subject: [PATCH 4/5] qa/workunits/rbd: test self-managed snapshot
create/remove permissions
Signed-off-by: Jason Dillaman <dillaman@redhat.com>
---
qa/workunits/rbd/permissions.sh | 92 +++++++++++++++++++++++++++++++++
1 file changed, 92 insertions(+)
diff --git a/qa/workunits/rbd/permissions.sh b/qa/workunits/rbd/permissions.sh
index a435a67..1a4295b 100755
--- a/qa/workunits/rbd/permissions.sh
+++ b/qa/workunits/rbd/permissions.sh
@@ -23,11 +23,27 @@ recreate_pools() {
delete_users() {
(ceph auth del client.volumes || true) >/dev/null 2>&1
(ceph auth del client.images || true) >/dev/null 2>&1
+
+ (ceph auth del client.snap_none || true) >/dev/null 2>&1
+ (ceph auth del client.snap_all || true) >/dev/null 2>&1
+ (ceph auth del client.snap_pool || true) >/dev/null 2>&1
+ (ceph auth del client.snap_profile_all || true) >/dev/null 2>&1
+ (ceph auth del client.snap_profile_pool || true) >/dev/null 2>&1
+
+ (ceph auth del client.mon_write || true) >/dev/null 2>&1
}
create_users() {
ceph auth get-or-create client.volumes mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow r class-read pool images, allow rwx pool volumes' >> $KEYRING
ceph auth get-or-create client.images mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool images' >> $KEYRING
+
+ ceph auth get-or-create client.snap_none mon 'allow r' >> $KEYRING
+ ceph auth get-or-create client.snap_all mon 'allow r' osd 'allow w' >> $KEYRING
+ ceph auth get-or-create client.snap_pool mon 'allow r' osd 'allow w pool=images' >> $KEYRING
+ ceph auth get-or-create client.snap_profile_all mon 'allow r' osd 'profile rbd' >> $KEYRING
+ ceph auth get-or-create client.snap_profile_pool mon 'allow r' osd 'profile rbd pool=images' >> $KEYRING
+
+ ceph auth get-or-create client.mon_write mon 'allow *' >> $KEYRING
}
expect() {
@@ -126,9 +142,83 @@ test_volumes_access() {
rbd -k $KEYRING --id volumes rm volumes/child
}
+create_self_managed_snapshot() {
+ ID=$1
+ POOL=$2
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python
+import rados
+
+cluster = rados.Rados(conffile="", rados_id="${ID}")
+cluster.connect()
+ioctx = cluster.open_ioctx("${POOL}")
+
+snap_id = ioctx.create_self_managed_snap()
+print ("Created snap id {}".format(snap_id))
+EOF
+}
+
+remove_self_managed_snapshot() {
+ ID=$1
+ POOL=$2
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python
+import rados
+
+cluster1 = rados.Rados(conffile="", rados_id="mon_write")
+cluster1.connect()
+ioctx1 = cluster1.open_ioctx("${POOL}")
+
+snap_id = ioctx1.create_self_managed_snap()
+print ("Created snap id {}".format(snap_id))
+
+cluster2 = rados.Rados(conffile="", rados_id="${ID}")
+cluster2.connect()
+ioctx2 = cluster2.open_ioctx("${POOL}")
+
+ioctx2.remove_self_managed_snap(snap_id)
+print ("Removed snap id {}".format(snap_id))
+EOF
+}
+
+test_remove_self_managed_snapshots() {
+ # Ensure users cannot create self-managed snapshots w/o permissions
+ expect 1 create_self_managed_snapshot snap_none images
+ expect 1 create_self_managed_snapshot snap_none volumes
+
+ create_self_managed_snapshot snap_all images
+ create_self_managed_snapshot snap_all volumes
+
+ create_self_managed_snapshot snap_pool images
+ expect 1 create_self_managed_snapshot snap_pool volumes
+
+ create_self_managed_snapshot snap_profile_all images
+ create_self_managed_snapshot snap_profile_all volumes
+
+ create_self_managed_snapshot snap_profile_pool images
+ expect 1 create_self_managed_snapshot snap_profile_pool volumes
+
+ # Ensure users cannot delete self-managed snapshots w/o permissions
+ expect 1 remove_self_managed_snapshot snap_none images
+ expect 1 remove_self_managed_snapshot snap_none volumes
+
+ remove_self_managed_snapshot snap_all images
+ remove_self_managed_snapshot snap_all volumes
+
+ remove_self_managed_snapshot snap_pool images
+ expect 1 remove_self_managed_snapshot snap_pool volumes
+
+ remove_self_managed_snapshot snap_profile_all images
+ remove_self_managed_snapshot snap_profile_all volumes
+
+ remove_self_managed_snapshot snap_profile_pool images
+ expect 1 remove_self_managed_snapshot snap_profile_pool volumes
+}
+
cleanup() {
rm -f $KEYRING
}
+
KEYRING=$(mktemp)
trap cleanup EXIT ERR HUP INT QUIT
@@ -141,6 +231,8 @@ test_images_access
recreate_pools
test_volumes_access
+test_remove_self_managed_snapshots
+
delete_pools
delete_users
From 4972e054b32c8200600f27564d50d443e683153e Mon Sep 17 00:00:00 2001
From: Jason Dillaman <dillaman@redhat.com>
Date: Tue, 5 Jun 2018 13:24:48 -0400
Subject: [PATCH 1/5] mon/OSDMonitor: enforce caps when creating/deleting
unmanaged snapshots
The entity will require write access to the OSD service or permission
for the synthetic "osd pool op unmanaged-snap" command.
Signed-off-by: Jason Dillaman <dillaman@redhat.com>
---
src/mon/OSDMonitor.cc | 131 +++++++++++++++++++++++++++++++++++++++++-
src/mon/OSDMonitor.h | 1 +
2 files changed, 130 insertions(+), 2 deletions(-)
From 61c06200fe59996bba2bb65fc402207bc10fd459 Mon Sep 17 00:00:00 2001
From: Jason Dillaman <dillaman@redhat.com>
Date: Wed, 20 Jun 2018 21:30:47 -0400
Subject: [PATCH 2/5] mon/OSDMonitor: enforce caps for all remaining pool ops
Signed-off-by: Jason Dillaman <dillaman@redhat.com>
---
src/mon/OSDMonitor.cc | 20 +++++++-------------
1 file changed, 7 insertions(+), 13 deletions(-)
diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc
index 2783356..80a3ff0 100644
--- a/src/mon/OSDMonitor.cc
+++ b/src/mon/OSDMonitor.cc
@@ -76,6 +76,9 @@
#include "include/str_map.h"
#include "include/scope_guard.h"
+#include "auth/cephx/CephxKeyServer.h"
+#include "osd/OSDCap.h"
+
#include "json_spirit/json_spirit_reader.h"
#include <boost/algorithm/string/predicate.hpp>
@@ -90,6 +93,87 @@ const uint32_t MAX_POOL_APPLICATIONS = 4;
const uint32_t MAX_POOL_APPLICATION_KEYS = 64;
const uint32_t MAX_POOL_APPLICATION_LENGTH = 128;
+bool is_osd_writable(const OSDCapGrant& grant, const std::string* pool_name) {
+ // Note: this doesn't include support for the application tag match
+ if ((grant.spec.allow & OSD_CAP_W) != 0) {
+ auto& match = grant.match;
+ if (match.is_match_all()) {
+ return true;
+ } else if (pool_name != nullptr && match.auid < 0 &&
+ !match.pool_namespace.pool_name.empty() &&
+ match.pool_namespace.pool_name == *pool_name) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool is_unmanaged_snap_op_permitted(CephContext* cct,
+ const KeyServer& key_server,
+ const EntityName& entity_name,
+ const MonCap& mon_caps,
+ const std::string* pool_name)
+{
+ typedef std::map<std::string, std::string> CommandArgs;
+
+ if (mon_caps.is_capable(cct, CEPH_ENTITY_TYPE_MON,
+ entity_name, "osd",
+ "osd pool op unmanaged-snap",
+ (pool_name == nullptr ?
+ CommandArgs{} /* pool DNE, require unrestricted cap */ :
+ CommandArgs{{"poolname", *pool_name}}),
+ false, true, false)) {
+ return true;
+ }
+
+ AuthCapsInfo caps_info;
+ if (!key_server.get_service_caps(entity_name, CEPH_ENTITY_TYPE_OSD,
+ caps_info)) {
+ dout(10) << "unable to locate OSD cap data for " << entity_name
+ << " in auth db" << dendl;
+ return false;
+ }
+
+ string caps_str;
+ if (caps_info.caps.length() > 0) {
+ auto p = caps_info.caps.cbegin();
+ try {
+ decode(caps_str, p);
+ } catch (const buffer::error &err) {
+ derr << "corrupt OSD cap data for " << entity_name << " in auth db"
+ << dendl;
+ return false;
+ }
+ }
+
+ OSDCap osd_cap;
+ if (!osd_cap.parse(caps_str, nullptr)) {
+ dout(10) << "unable to parse OSD cap data for " << entity_name
+ << " in auth db" << dendl;
+ return false;
+ }
+
+ // if the entity has write permissions in one or all pools, permit
+ // usage of unmanaged-snapshots
+ if (osd_cap.allow_all()) {
+ return true;
+ }
+
+ for (auto& grant : osd_cap.grants) {
+ if (grant.profile.is_valid()) {
+ for (auto& profile_grant : grant.profile_grants) {
+ if (is_osd_writable(profile_grant, pool_name)) {
+ return true;
+ }
+ }
+ } else if (is_osd_writable(grant, pool_name)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
} // anonymous namespace
void LastEpochClean::Lec::report(ps_t ps, epoch_t last_epoch_clean)
@@ -11424,11 +11508,61 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op,
return true;
}
-bool OSDMonitor::preprocess_pool_op(MonOpRequestRef op)
+bool OSDMonitor::enforce_pool_op_caps(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
+
MPoolOp *m = static_cast<MPoolOp*>(op->get_req());
-
+ MonSession *session = m->get_session();
+ if (!session) {
+ _pool_op_reply(op, -EPERM, osdmap.get_epoch());
+ return true;
+ }
+
+ switch (m->op) {
+ case POOL_OP_CREATE_UNMANAGED_SNAP:
+ case POOL_OP_DELETE_UNMANAGED_SNAP:
+ {
+ const std::string* pool_name = nullptr;
+ const pg_pool_t *pg_pool = osdmap.get_pg_pool(m->pool);
+ if (pg_pool != nullptr) {
+ pool_name = &osdmap.get_pool_name(m->pool);
+ }
+
+ if (!is_unmanaged_snap_op_permitted(cct, mon->key_server,
+ session->entity_name, session->caps,
+ pool_name)) {
+ dout(0) << "got unmanaged-snap pool op from entity with insufficient "
+ << "privileges. message: " << *m << std::endl
+ << "caps: " << session->caps << dendl;
+ _pool_op_reply(op, -EPERM, osdmap.get_epoch());
+ return true;
+ }
+ }
+ break;
+ default:
+ if (!session->is_capable("osd", MON_CAP_W)) {
+ dout(0) << "got pool op from entity with insufficient privileges. "
+ << "message: " << *m << std::endl
+ << "caps: " << session->caps << dendl;
+ _pool_op_reply(op, -EPERM, osdmap.get_epoch());
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+bool OSDMonitor::preprocess_pool_op(MonOpRequestRef op)
+{
+ op->mark_osdmon_event(__func__);
+ MPoolOp *m = static_cast<MPoolOp*>(op->get_req());
+
+ if (enforce_pool_op_caps(op)) {
+ return true;
+ }
+
if (m->fsid != mon->monmap->fsid) {
dout(0) << __func__ << " drop message on fsid " << m->fsid
<< " != " << mon->monmap->fsid << " for " << *m << dendl;
@@ -11508,19 +11642,6 @@ bool OSDMonitor::preprocess_pool_op_create(MonOpRequestRef op)
{
op->mark_osdmon_event(__func__);
MPoolOp *m = static_cast<MPoolOp*>(op->get_req());
- MonSession *session = m->get_session();
- if (!session) {
- _pool_op_reply(op, -EPERM, osdmap.get_epoch());
- return true;
- }
- if (!session->is_capable("osd", MON_CAP_W)) {
- dout(5) << "attempt to create new pool without sufficient auid privileges!"
- << "message: " << *m << std::endl
- << "caps: " << session->caps << dendl;
- _pool_op_reply(op, -EPERM, osdmap.get_epoch());
- return true;
- }
-
int64_t pool = osdmap.lookup_pg_pool_name(m->name.c_str());
if (pool >= 0) {
_pool_op_reply(op, 0, osdmap.get_epoch());
diff --git a/src/mon/OSDMonitor.h b/src/mon/OSDMonitor.h
index b1c6fcd..4395412 100644
--- a/src/mon/OSDMonitor.h
+++ b/src/mon/OSDMonitor.h
@@ -300,6 +300,7 @@ private:
int _prepare_remove_pool(int64_t pool, ostream *ss, bool no_fake);
int _prepare_rename_pool(int64_t pool, string newname);
+ bool enforce_pool_op_caps(MonOpRequestRef op);
bool preprocess_pool_op (MonOpRequestRef op);
bool preprocess_pool_op_create (MonOpRequestRef op);
bool prepare_pool_op (MonOpRequestRef op);
From 21538304d78df9f3b9f54a5b3c9b6b62fc3e6b48 Mon Sep 17 00:00:00 2001
From: Jason Dillaman <dillaman@redhat.com>
Date: Tue, 5 Jun 2018 14:48:17 -0400
Subject: [PATCH 3/5] pybind/rados: new methods for manipulating self-managed
snapshots
Signed-off-by: Jason Dillaman <dillaman@redhat.com>
---
src/pybind/rados/rados.pyx | 106 ++++++++++++++++++++++++++++++++++
src/test/pybind/test_rados.py | 37 ++++++++++++
2 files changed, 143 insertions(+)
diff --git a/src/pybind/rados/rados.pyx b/src/pybind/rados/rados.pyx
index 1131801..baa4af4 100644
--- a/src/pybind/rados/rados.pyx
+++ b/src/pybind/rados/rados.pyx
@@ -226,6 +226,17 @@ cdef extern from "rados/librados.h" nogil:
int rados_ioctx_snap_list(rados_ioctx_t io, rados_snap_t * snaps, int maxlen)
int rados_ioctx_snap_get_stamp(rados_ioctx_t io, rados_snap_t id, time_t * t)
+ int rados_ioctx_selfmanaged_snap_create(rados_ioctx_t io,
+ rados_snap_t *snapid)
+ int rados_ioctx_selfmanaged_snap_remove(rados_ioctx_t io,
+ rados_snap_t snapid)
+ int rados_ioctx_selfmanaged_snap_set_write_ctx(rados_ioctx_t io,
+ rados_snap_t snap_seq,
+ rados_snap_t *snap,
+ int num_snaps)
+ int rados_ioctx_selfmanaged_snap_rollback(rados_ioctx_t io, const char *oid,
+ rados_snap_t snapid)
+
int rados_lock_exclusive(rados_ioctx_t io, const char * oid, const char * name,
const char * cookie, const char * desc,
timeval * duration, uint8_t flags)
@@ -3115,6 +3126,101 @@ returned %d, but should return zero on success." % (self.name, ret))
if ret != 0:
raise make_ex(ret, "Failed to rollback %s" % oid)
+ def create_self_managed_snap(self):
+ """
+ Creates a self-managed snapshot
+
+ :returns: snap id on success
+
+ :raises: :class:`Error`
+ """
+ self.require_ioctx_open()
+ cdef:
+ rados_snap_t _snap_id
+ with nogil:
+ ret = rados_ioctx_selfmanaged_snap_create(self.io, &_snap_id)
+ if ret != 0:
+ raise make_ex(ret, "Failed to create self-managed snapshot")
+ return int(_snap_id)
+
+ @requires(('snap_id', int))
+ def remove_self_managed_snap(self, snap_id):
+ """
+ Removes a self-managed snapshot
+
+ :param snap_id: the name of the snapshot
+ :type snap_id: int
+
+ :raises: :class:`TypeError`
+ :raises: :class:`Error`
+ """
+ self.require_ioctx_open()
+ cdef:
+ rados_snap_t _snap_id = snap_id
+ with nogil:
+ ret = rados_ioctx_selfmanaged_snap_remove(self.io, _snap_id)
+ if ret != 0:
+ raise make_ex(ret, "Failed to remove self-managed snapshot")
+
+ def set_self_managed_snap_write(self, snaps):
+ """
+ Updates the write context to the specified self-managed
+ snapshot ids.
+
+ :param snaps: all associated self-managed snapshot ids
+ :type snaps: list
+
+ :raises: :class:`TypeError`
+ :raises: :class:`Error`
+ """
+ self.require_ioctx_open()
+ sorted_snaps = []
+ snap_seq = 0
+ if snaps:
+ sorted_snaps = sorted([int(x) for x in snaps], reverse=True)
+ snap_seq = sorted_snaps[0]
+
+ cdef:
+ rados_snap_t _snap_seq = snap_seq
+ rados_snap_t *_snaps = NULL
+ int _num_snaps = len(sorted_snaps)
+ try:
+ _snaps = <rados_snap_t *>malloc(_num_snaps * sizeof(rados_snap_t))
+ for i in range(len(sorted_snaps)):
+ _snaps[i] = sorted_snaps[i]
+ with nogil:
+ ret = rados_ioctx_selfmanaged_snap_set_write_ctx(self.io,
+ _snap_seq,
+ _snaps,
+ _num_snaps)
+ if ret != 0:
+ raise make_ex(ret, "Failed to update snapshot write context")
+ finally:
+ free(_snaps)
+
+ @requires(('oid', str_type), ('snap_id', int))
+ def rollback_self_managed_snap(self, oid, snap_id):
+ """
+ Rolls an specific object back to a self-managed snapshot revision
+
+ :param oid: the name of the object
+ :type oid: str
+ :param snap_id: the name of the snapshot
+ :type snap_id: int
+
+ :raises: :class:`TypeError`
+ :raises: :class:`Error`
+ """
+ self.require_ioctx_open()
+ oid = cstr(oid, 'oid')
+ cdef:
+ char *_oid = oid
+ rados_snap_t _snap_id = snap_id
+ with nogil:
+ ret = rados_ioctx_selfmanaged_snap_rollback(self.io, _oid, _snap_id)
+ if ret != 0:
+ raise make_ex(ret, "Failed to rollback %s" % oid)
+
def get_last_version(self):
"""
Return the version of the last object read or written to.
diff --git a/src/test/pybind/test_rados.py b/src/test/pybind/test_rados.py
index 88b8d2a..038a3c9 100644
--- a/src/test/pybind/test_rados.py
+++ b/src/test/pybind/test_rados.py
@@ -926,6 +926,43 @@ class TestObject(object):
eq(self.object.read(3), b'bar')
eq(self.object.read(3), b'baz')
+class TestIoCtxSelfManagedSnaps(object):
+ def setUp(self):
+ self.rados = Rados(conffile='')
+ self.rados.connect()
+ self.rados.create_pool('test_pool')
+ assert self.rados.pool_exists('test_pool')
+ self.ioctx = self.rados.open_ioctx('test_pool')
+
+ def tearDown(self):
+ cmd = {"prefix":"osd unset", "key":"noup"}
+ self.rados.mon_command(json.dumps(cmd), b'')
+ self.ioctx.close()
+ self.rados.delete_pool('test_pool')
+ self.rados.shutdown()
+
+ def test(self):
+ # cannot mix-and-match pool and self-managed snapshot mode
+ self.ioctx.set_self_managed_snap_write([])
+ self.ioctx.write('abc', b'abc')
+ snap_id_1 = self.ioctx.create_self_managed_snap()
+ self.ioctx.set_self_managed_snap_write([snap_id_1])
+
+ self.ioctx.write('abc', b'def')
+ snap_id_2 = self.ioctx.create_self_managed_snap()
+ self.ioctx.set_self_managed_snap_write([snap_id_1, snap_id_2])
+
+ self.ioctx.write('abc', b'ghi')
+
+ self.ioctx.rollback_self_managed_snap('abc', snap_id_1)
+ eq(self.ioctx.read('abc'), b'abc')
+
+ self.ioctx.rollback_self_managed_snap('abc', snap_id_2)
+ eq(self.ioctx.read('abc'), b'def')
+
+ self.ioctx.remove_self_managed_snap(snap_id_1)
+ self.ioctx.remove_self_managed_snap(snap_id_2)
+
class TestCommand(object):
def setUp(self):