Browse code

ceph: Fix for CVE-2018-10861

- Added upstream patch from below link:
https://github.com/ceph/ceph/commit/975528f632f73fbffa3f1fee304e3bbe3296cffc
- Patch was not able to applied directly so modified the patch a bit(No code change only line number changed)
- With Patch, build was failing so fixed it with another patch

Change-Id: I59fdcc75e3a507aa268f6ce3974c6c729415ef4a
Reviewed-on: http://photon-jenkins.eng.vmware.com:8082/5880
Tested-by: gerrit-photon <photon-checkins@vmware.com>
Reviewed-by: Anish Swaminathan <anishs@vmware.com>

Ankit Jain authored on 2018/10/06 04:12:12
Showing 3 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1,674 @@
0
+From 97e3f0aa013dad49de1a55f8025327de7e801822 Mon Sep 17 00:00:00 2001
1
+From: Jason Dillaman <dillaman@redhat.com>
2
+Date: Wed, 20 Jun 2018 22:20:14 -0400
3
+Subject: [PATCH 5/5] qa/workunits/rados: test pool op permissions
4
+
5
+Signed-off-by: Jason Dillaman <dillaman@redhat.com>
6
+---
7
+ qa/workunits/rados/test_pool_access.sh | 99 ++++++++++++++++++++++++--
8
+ 1 file changed, 92 insertions(+), 7 deletions(-)
9
+
10
+diff --git a/qa/workunits/rados/test_pool_access.sh b/qa/workunits/rados/test_pool_access.sh
11
+index 8597b71..947b57e 100755
12
+--- a/qa/workunits/rados/test_pool_access.sh
13
+@@ -2,22 +2,107 @@
14
+ 
15
+ set -e
16
+ 
17
+-expect_1()
18
++KEYRING=$(mktemp)
19
++trap cleanup EXIT ERR HUP INT QUIT
20
++
21
++cleanup() {
22
++    (ceph auth del client.mon_read || true) >/dev/null 2>&1
23
++    (ceph auth del client.mon_write || true) >/dev/null 2>&1
24
++
25
++    rm -f $KEYRING
26
++}
27
++
28
++expect_false()
29
+ {
30
+-  set -x
31
+-  set +e
32
+-  "$@"
33
+-  if [ $? == 1 ]; then return 0; else return 1; fi
34
++	set -x
35
++	if "$@"; then return 1; else return 0; fi
36
++}
37
++
38
++create_pool_op() {
39
++  ID=$1
40
++  POOL=$2
41
++
42
++  cat << EOF | CEPH_ARGS="-k $KEYRING" python
43
++import rados
44
++
45
++cluster = rados.Rados(conffile="", rados_id="${ID}")
46
++cluster.connect()
47
++cluster.create_pool("${POOL}")
48
++EOF
49
+ }
50
+ 
51
++delete_pool_op() {
52
++  ID=$1
53
++  POOL=$2
54
++
55
++  cat << EOF | CEPH_ARGS="-k $KEYRING" python
56
++import rados
57
++
58
++cluster = rados.Rados(conffile="", rados_id="${ID}")
59
++cluster.connect()
60
++cluster.delete_pool("${POOL}")
61
++EOF
62
++}
63
++
64
++create_pool_snap_op() {
65
++  ID=$1
66
++  POOL=$2
67
++  SNAP=$3
68
++
69
++  cat << EOF | CEPH_ARGS="-k $KEYRING" python
70
++import rados
71
++
72
++cluster = rados.Rados(conffile="", rados_id="${ID}")
73
++cluster.connect()
74
++ioctx = cluster.open_ioctx("${POOL}")
75
++
76
++ioctx.create_snap("${SNAP}")
77
++EOF
78
++}
79
++
80
++remove_pool_snap_op() {
81
++  ID=$1
82
++  POOL=$2
83
++  SNAP=$3
84
++
85
++  cat << EOF | CEPH_ARGS="-k $KEYRING" python
86
++import rados
87
++
88
++cluster = rados.Rados(conffile="", rados_id="${ID}")
89
++cluster.connect()
90
++ioctx = cluster.open_ioctx("${POOL}")
91
++
92
++ioctx.remove_snap("${SNAP}")
93
++EOF
94
++}
95
++
96
++test_pool_op()
97
++{
98
++    ceph auth get-or-create client.mon_read mon 'allow r' >> $KEYRING
99
++    ceph auth get-or-create client.mon_write mon 'allow *' >> $KEYRING
100
++
101
++    expect_false create_pool_op mon_read pool1
102
++    create_pool_op mon_write pool1
103
++
104
++    expect_false create_pool_snap_op mon_read pool1 snap1
105
++    create_pool_snap_op mon_write pool1 snap1
106
++
107
++    expect_false remove_pool_snap_op mon_read pool1 snap1
108
++    remove_pool_snap_op mon_write pool1 snap1
109
++
110
++    expect_false delete_pool_op mon_read pool1
111
++    delete_pool_op mon_write pool1
112
++}
113
+ 
114
+ key=`ceph auth get-or-create-key client.poolaccess1 mon 'allow r' osd 'allow *'`
115
+ rados --id poolaccess1 --key $key -p rbd ls
116
+ 
117
+ key=`ceph auth get-or-create-key client.poolaccess2 mon 'allow r' osd 'allow * pool=nopool'`
118
+-expect_1 rados --id poolaccess2 --key $key -p rbd ls
119
++expect_false rados --id poolaccess2 --key $key -p rbd ls
120
+ 
121
+ key=`ceph auth get-or-create-key client.poolaccess3 mon 'allow r' osd 'allow rw pool=nopool'`
122
+-expect_1 rados --id poolaccess3 --key $key -p rbd ls
123
++expect_false rados --id poolaccess3 --key $key -p rbd ls
124
++
125
++test_pool_op
126
+ 
127
+ echo OK
128
+
129
+From b12dd0bf419ae834abb31c712830fa9c4b5cda9c Mon Sep 17 00:00:00 2001
130
+From: Jason Dillaman <dillaman@redhat.com>
131
+Date: Tue, 5 Jun 2018 15:40:44 -0400
132
+Subject: [PATCH 4/5] qa/workunits/rbd: test self-managed snapshot
133
+ create/remove permissions
134
+
135
+Signed-off-by: Jason Dillaman <dillaman@redhat.com>
136
+---
137
+ qa/workunits/rbd/permissions.sh | 92 +++++++++++++++++++++++++++++++++
138
+ 1 file changed, 92 insertions(+)
139
+
140
+diff --git a/qa/workunits/rbd/permissions.sh b/qa/workunits/rbd/permissions.sh
141
+index a435a67..1a4295b 100755
142
+--- a/qa/workunits/rbd/permissions.sh
143
+@@ -23,11 +23,27 @@ recreate_pools() {
144
+ delete_users() {
145
+     (ceph auth del client.volumes || true) >/dev/null 2>&1
146
+     (ceph auth del client.images || true) >/dev/null 2>&1
147
++
148
++    (ceph auth del client.snap_none || true) >/dev/null 2>&1
149
++    (ceph auth del client.snap_all || true) >/dev/null 2>&1
150
++    (ceph auth del client.snap_pool || true) >/dev/null 2>&1
151
++    (ceph auth del client.snap_profile_all || true) >/dev/null 2>&1
152
++    (ceph auth del client.snap_profile_pool || true) >/dev/null 2>&1
153
++
154
++    (ceph auth del client.mon_write || true) >/dev/null 2>&1
155
+ }
156
+ 
157
+ create_users() {
158
+     ceph auth get-or-create client.volumes mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow r class-read pool images, allow rwx pool volumes' >> $KEYRING
159
+     ceph auth get-or-create client.images mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool images' >> $KEYRING
160
++
161
++    ceph auth get-or-create client.snap_none mon 'allow r' >> $KEYRING
162
++    ceph auth get-or-create client.snap_all mon 'allow r' osd 'allow w' >> $KEYRING
163
++    ceph auth get-or-create client.snap_pool mon 'allow r' osd 'allow w pool=images' >> $KEYRING
164
++    ceph auth get-or-create client.snap_profile_all mon 'allow r' osd 'profile rbd' >> $KEYRING
165
++    ceph auth get-or-create client.snap_profile_pool mon 'allow r' osd 'profile rbd pool=images' >> $KEYRING
166
++
167
++    ceph auth get-or-create client.mon_write mon 'allow *' >> $KEYRING
168
+ }
169
+ 
170
+ expect() {
171
+@@ -126,9 +142,83 @@ test_volumes_access() {
172
+     rbd -k $KEYRING --id volumes rm volumes/child
173
+ }
174
+ 
175
++create_self_managed_snapshot() {
176
++  ID=$1
177
++  POOL=$2
178
++
179
++  cat << EOF | CEPH_ARGS="-k $KEYRING" python
180
++import rados
181
++
182
++cluster = rados.Rados(conffile="", rados_id="${ID}")
183
++cluster.connect()
184
++ioctx = cluster.open_ioctx("${POOL}")
185
++
186
++snap_id = ioctx.create_self_managed_snap()
187
++print ("Created snap id {}".format(snap_id))
188
++EOF
189
++}
190
++
191
++remove_self_managed_snapshot() {
192
++  ID=$1
193
++  POOL=$2
194
++
195
++  cat << EOF | CEPH_ARGS="-k $KEYRING" python
196
++import rados
197
++
198
++cluster1 = rados.Rados(conffile="", rados_id="mon_write")
199
++cluster1.connect()
200
++ioctx1 = cluster1.open_ioctx("${POOL}")
201
++
202
++snap_id = ioctx1.create_self_managed_snap()
203
++print ("Created snap id {}".format(snap_id))
204
++
205
++cluster2 = rados.Rados(conffile="", rados_id="${ID}")
206
++cluster2.connect()
207
++ioctx2 = cluster2.open_ioctx("${POOL}")
208
++
209
++ioctx2.remove_self_managed_snap(snap_id)
210
++print ("Removed snap id {}".format(snap_id))
211
++EOF
212
++}
213
++
214
++test_remove_self_managed_snapshots() {
215
++    # Ensure users cannot create self-managed snapshots w/o permissions
216
++    expect 1 create_self_managed_snapshot snap_none images
217
++    expect 1 create_self_managed_snapshot snap_none volumes
218
++
219
++    create_self_managed_snapshot snap_all images
220
++    create_self_managed_snapshot snap_all volumes
221
++
222
++    create_self_managed_snapshot snap_pool images
223
++    expect 1 create_self_managed_snapshot snap_pool volumes
224
++
225
++    create_self_managed_snapshot snap_profile_all images
226
++    create_self_managed_snapshot snap_profile_all volumes
227
++
228
++    create_self_managed_snapshot snap_profile_pool images
229
++    expect 1 create_self_managed_snapshot snap_profile_pool volumes
230
++
231
++    # Ensure users cannot delete self-managed snapshots w/o permissions
232
++    expect 1 remove_self_managed_snapshot snap_none images
233
++    expect 1 remove_self_managed_snapshot snap_none volumes
234
++
235
++    remove_self_managed_snapshot snap_all images
236
++    remove_self_managed_snapshot snap_all volumes
237
++
238
++    remove_self_managed_snapshot snap_pool images
239
++    expect 1 remove_self_managed_snapshot snap_pool volumes
240
++
241
++    remove_self_managed_snapshot snap_profile_all images
242
++    remove_self_managed_snapshot snap_profile_all volumes
243
++
244
++    remove_self_managed_snapshot snap_profile_pool images
245
++    expect 1 remove_self_managed_snapshot snap_profile_pool volumes
246
++}
247
++
248
+ cleanup() {
249
+     rm -f $KEYRING
250
+ }
251
++
252
+ KEYRING=$(mktemp)
253
+ trap cleanup EXIT ERR HUP INT QUIT
254
+ 
255
+@@ -141,6 +231,8 @@ test_images_access
256
+ recreate_pools
257
+ test_volumes_access
258
+ 
259
++test_remove_self_managed_snapshots
260
++
261
+ delete_pools
262
+ delete_users
263
+ 
264
+From 4972e054b32c8200600f27564d50d443e683153e Mon Sep 17 00:00:00 2001
265
+From: Jason Dillaman <dillaman@redhat.com>
266
+Date: Tue, 5 Jun 2018 13:24:48 -0400
267
+Subject: [PATCH 1/5] mon/OSDMonitor: enforce caps when creating/deleting
268
+ unmanaged snapshots
269
+
270
+The entity will require write access to the OSD service or permission
271
+for the synthetic "osd pool op unmanaged-snap" command.
272
+
273
+Signed-off-by: Jason Dillaman <dillaman@redhat.com>
274
+---
275
+ src/mon/OSDMonitor.cc | 131 +++++++++++++++++++++++++++++++++++++++++-
276
+ src/mon/OSDMonitor.h  |   1 +
277
+ 2 files changed, 130 insertions(+), 2 deletions(-)
278
+
279
+From 61c06200fe59996bba2bb65fc402207bc10fd459 Mon Sep 17 00:00:00 2001
280
+From: Jason Dillaman <dillaman@redhat.com>
281
+Date: Wed, 20 Jun 2018 21:30:47 -0400
282
+Subject: [PATCH 2/5] mon/OSDMonitor: enforce caps for all remaining pool ops
283
+
284
+Signed-off-by: Jason Dillaman <dillaman@redhat.com>
285
+---
286
+ src/mon/OSDMonitor.cc | 20 +++++++-------------
287
+ 1 file changed, 7 insertions(+), 13 deletions(-)
288
+
289
+diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc
290
+index 2783356..80a3ff0 100644
291
+--- a/src/mon/OSDMonitor.cc
292
+@@ -76,6 +76,9 @@
293
+ #include "include/str_map.h"
294
+ #include "include/scope_guard.h"
295
+ 
296
++#include "auth/cephx/CephxKeyServer.h"
297
++#include "osd/OSDCap.h"
298
++
299
+ #include "json_spirit/json_spirit_reader.h"
300
+ 
301
+ #include <boost/algorithm/string/predicate.hpp>
302
+@@ -90,6 +93,87 @@ const uint32_t MAX_POOL_APPLICATIONS = 4;
303
+ const uint32_t MAX_POOL_APPLICATION_KEYS = 64;
304
+ const uint32_t MAX_POOL_APPLICATION_LENGTH = 128;
305
+ 
306
++bool is_osd_writable(const OSDCapGrant& grant, const std::string* pool_name) {
307
++  // Note: this doesn't include support for the application tag match
308
++  if ((grant.spec.allow & OSD_CAP_W) != 0) {
309
++    auto& match = grant.match;
310
++    if (match.is_match_all()) {
311
++      return true;
312
++    } else if (pool_name != nullptr && match.auid < 0 &&
313
++               !match.pool_namespace.pool_name.empty() &&
314
++               match.pool_namespace.pool_name == *pool_name) {
315
++      return true;
316
++    }
317
++  }
318
++  return false;
319
++}
320
++
321
++bool is_unmanaged_snap_op_permitted(CephContext* cct,
322
++                                    const KeyServer& key_server,
323
++                                    const EntityName& entity_name,
324
++                                    const MonCap& mon_caps,
325
++                                    const std::string* pool_name)
326
++{
327
++  typedef std::map<std::string, std::string> CommandArgs;
328
++
329
++  if (mon_caps.is_capable(cct, CEPH_ENTITY_TYPE_MON,
330
++                               entity_name, "osd",
331
++                               "osd pool op unmanaged-snap",
332
++                               (pool_name == nullptr ?
333
++                                  CommandArgs{} /* pool DNE, require unrestricted cap */ :
334
++                                  CommandArgs{{"poolname", *pool_name}}),
335
++                                false, true, false)) {
336
++    return true;
337
++  }
338
++
339
++  AuthCapsInfo caps_info;
340
++  if (!key_server.get_service_caps(entity_name, CEPH_ENTITY_TYPE_OSD,
341
++                                   caps_info)) {
342
++    dout(10) << "unable to locate OSD cap data for " << entity_name
343
++             << " in auth db" << dendl;
344
++    return false;
345
++  }
346
++
347
++  string caps_str;
348
++  if (caps_info.caps.length() > 0) {
349
++    auto p = caps_info.caps.cbegin();
350
++    try {
351
++      decode(caps_str, p);
352
++    } catch (const buffer::error &err) {
353
++      derr << "corrupt OSD cap data for " << entity_name << " in auth db"
354
++           << dendl;
355
++      return false;
356
++    }
357
++  }
358
++
359
++  OSDCap osd_cap;
360
++  if (!osd_cap.parse(caps_str, nullptr)) {
361
++    dout(10) << "unable to parse OSD cap data for " << entity_name
362
++             << " in auth db" << dendl;
363
++    return false;
364
++  }
365
++
366
++  // if the entity has write permissions in one or all pools, permit
367
++  // usage of unmanaged-snapshots
368
++  if (osd_cap.allow_all()) {
369
++    return true;
370
++  }
371
++
372
++  for (auto& grant : osd_cap.grants) {
373
++    if (grant.profile.is_valid()) {
374
++      for (auto& profile_grant : grant.profile_grants) {
375
++        if (is_osd_writable(profile_grant, pool_name)) {
376
++          return true;
377
++        }
378
++      }
379
++    } else if (is_osd_writable(grant, pool_name)) {
380
++      return true;
381
++    }
382
++  }
383
++
384
++  return false;
385
++}
386
++
387
+ } // anonymous namespace
388
+ 
389
+ void LastEpochClean::Lec::report(ps_t ps, epoch_t last_epoch_clean)
390
+@@ -11424,11 +11508,61 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op,
391
+   return true;
392
+ }
393
+ 
394
+-bool OSDMonitor::preprocess_pool_op(MonOpRequestRef op) 
395
++bool OSDMonitor::enforce_pool_op_caps(MonOpRequestRef op)
396
+ {
397
+   op->mark_osdmon_event(__func__);
398
++
399
+   MPoolOp *m = static_cast<MPoolOp*>(op->get_req());
400
+-  
401
++  MonSession *session = m->get_session();
402
++  if (!session) {
403
++    _pool_op_reply(op, -EPERM, osdmap.get_epoch());
404
++    return true;
405
++  }
406
++
407
++  switch (m->op) {
408
++  case POOL_OP_CREATE_UNMANAGED_SNAP:
409
++  case POOL_OP_DELETE_UNMANAGED_SNAP:
410
++    {
411
++      const std::string* pool_name = nullptr;
412
++      const pg_pool_t *pg_pool = osdmap.get_pg_pool(m->pool);
413
++      if (pg_pool != nullptr) {
414
++        pool_name = &osdmap.get_pool_name(m->pool);
415
++      }
416
++
417
++      if (!is_unmanaged_snap_op_permitted(cct, mon->key_server,
418
++                                          session->entity_name, session->caps,
419
++                                          pool_name)) {
420
++        dout(0) << "got unmanaged-snap pool op from entity with insufficient "
421
++                << "privileges. message: " << *m  << std::endl
422
++                << "caps: " << session->caps << dendl;
423
++        _pool_op_reply(op, -EPERM, osdmap.get_epoch());
424
++        return true;
425
++      }
426
++    }
427
++    break;
428
++  default:
429
++    if (!session->is_capable("osd", MON_CAP_W)) {
430
++      dout(0) << "got pool op from entity with insufficient privileges. "
431
++              << "message: " << *m  << std::endl
432
++              << "caps: " << session->caps << dendl;
433
++      _pool_op_reply(op, -EPERM, osdmap.get_epoch());
434
++      return true;
435
++    }
436
++    break;
437
++  }
438
++
439
++  return false;
440
++}
441
++
442
++bool OSDMonitor::preprocess_pool_op(MonOpRequestRef op)
443
++{
444
++  op->mark_osdmon_event(__func__);
445
++  MPoolOp *m = static_cast<MPoolOp*>(op->get_req());
446
++
447
++  if (enforce_pool_op_caps(op)) {
448
++    return true;
449
++  }
450
++
451
+   if (m->fsid != mon->monmap->fsid) {
452
+     dout(0) << __func__ << " drop message on fsid " << m->fsid
453
+             << " != " << mon->monmap->fsid << " for " << *m << dendl;
454
+@@ -11508,19 +11642,6 @@ bool OSDMonitor::preprocess_pool_op_create(MonOpRequestRef op)
455
+ {
456
+   op->mark_osdmon_event(__func__);
457
+   MPoolOp *m = static_cast<MPoolOp*>(op->get_req());
458
+-  MonSession *session = m->get_session();
459
+-  if (!session) {
460
+-    _pool_op_reply(op, -EPERM, osdmap.get_epoch());
461
+-    return true;
462
+-  }
463
+-  if (!session->is_capable("osd", MON_CAP_W)) {
464
+-    dout(5) << "attempt to create new pool without sufficient auid privileges!"
465
+-	    << "message: " << *m  << std::endl
466
+-	    << "caps: " << session->caps << dendl;
467
+-    _pool_op_reply(op, -EPERM, osdmap.get_epoch());
468
+-    return true;
469
+-  }
470
+-
471
+   int64_t pool = osdmap.lookup_pg_pool_name(m->name.c_str());
472
+   if (pool >= 0) {
473
+     _pool_op_reply(op, 0, osdmap.get_epoch());
474
+diff --git a/src/mon/OSDMonitor.h b/src/mon/OSDMonitor.h
475
+index b1c6fcd..4395412 100644
476
+--- a/src/mon/OSDMonitor.h
477
+@@ -300,6 +300,7 @@ private:
478
+   int _prepare_remove_pool(int64_t pool, ostream *ss, bool no_fake);
479
+   int _prepare_rename_pool(int64_t pool, string newname);
480
+ 
481
++  bool enforce_pool_op_caps(MonOpRequestRef op);
482
+   bool preprocess_pool_op (MonOpRequestRef op);
483
+   bool preprocess_pool_op_create (MonOpRequestRef op);
484
+   bool prepare_pool_op (MonOpRequestRef op);
485
+
486
+From 21538304d78df9f3b9f54a5b3c9b6b62fc3e6b48 Mon Sep 17 00:00:00 2001
487
+From: Jason Dillaman <dillaman@redhat.com>
488
+Date: Tue, 5 Jun 2018 14:48:17 -0400
489
+Subject: [PATCH 3/5] pybind/rados: new methods for manipulating self-managed
490
+ snapshots
491
+
492
+Signed-off-by: Jason Dillaman <dillaman@redhat.com>
493
+---
494
+ src/pybind/rados/rados.pyx    | 106 ++++++++++++++++++++++++++++++++++
495
+ src/test/pybind/test_rados.py |  37 ++++++++++++
496
+ 2 files changed, 143 insertions(+)
497
+
498
+diff --git a/src/pybind/rados/rados.pyx b/src/pybind/rados/rados.pyx
499
+index 1131801..baa4af4 100644
500
+--- a/src/pybind/rados/rados.pyx
501
+@@ -226,6 +226,17 @@ cdef extern from "rados/librados.h" nogil:
502
+     int rados_ioctx_snap_list(rados_ioctx_t io, rados_snap_t * snaps, int maxlen)
503
+     int rados_ioctx_snap_get_stamp(rados_ioctx_t io, rados_snap_t id, time_t * t)
504
+ 
505
++    int rados_ioctx_selfmanaged_snap_create(rados_ioctx_t io,
506
++                                            rados_snap_t *snapid)
507
++    int rados_ioctx_selfmanaged_snap_remove(rados_ioctx_t io,
508
++                                            rados_snap_t snapid)
509
++    int rados_ioctx_selfmanaged_snap_set_write_ctx(rados_ioctx_t io,
510
++                                                   rados_snap_t snap_seq,
511
++                                                   rados_snap_t *snap,
512
++                                                   int num_snaps)
513
++    int rados_ioctx_selfmanaged_snap_rollback(rados_ioctx_t io, const char *oid,
514
++                                              rados_snap_t snapid)
515
++
516
+     int rados_lock_exclusive(rados_ioctx_t io, const char * oid, const char * name,
517
+                              const char * cookie, const char * desc,
518
+                              timeval * duration, uint8_t flags)
519
+@@ -3115,6 +3126,101 @@ returned %d, but should return zero on success." % (self.name, ret))
520
+         if ret != 0:
521
+             raise make_ex(ret, "Failed to rollback %s" % oid)
522
+ 
523
++    def create_self_managed_snap(self):
524
++        """
525
++        Creates a self-managed snapshot
526
++
527
++        :returns: snap id on success
528
++
529
++        :raises: :class:`Error`
530
++        """
531
++        self.require_ioctx_open()
532
++        cdef:
533
++            rados_snap_t _snap_id
534
++        with nogil:
535
++            ret = rados_ioctx_selfmanaged_snap_create(self.io, &_snap_id)
536
++        if ret != 0:
537
++            raise make_ex(ret, "Failed to create self-managed snapshot")
538
++        return int(_snap_id)
539
++
540
++    @requires(('snap_id', int))
541
++    def remove_self_managed_snap(self, snap_id):
542
++        """
543
++        Removes a self-managed snapshot
544
++
545
++        :param snap_id: the name of the snapshot
546
++        :type snap_id: int
547
++
548
++        :raises: :class:`TypeError`
549
++        :raises: :class:`Error`
550
++        """
551
++        self.require_ioctx_open()
552
++        cdef:
553
++            rados_snap_t _snap_id = snap_id
554
++        with nogil:
555
++            ret = rados_ioctx_selfmanaged_snap_remove(self.io, _snap_id)
556
++        if ret != 0:
557
++            raise make_ex(ret, "Failed to remove self-managed snapshot")
558
++
559
++    def set_self_managed_snap_write(self, snaps):
560
++        """
561
++        Updates the write context to the specified self-managed
562
++        snapshot ids.
563
++
564
++        :param snaps: all associated self-managed snapshot ids
565
++        :type snaps: list
566
++
567
++        :raises: :class:`TypeError`
568
++        :raises: :class:`Error`
569
++        """
570
++        self.require_ioctx_open()
571
++        sorted_snaps = []
572
++        snap_seq = 0
573
++        if snaps:
574
++            sorted_snaps = sorted([int(x) for x in snaps], reverse=True)
575
++            snap_seq = sorted_snaps[0]
576
++
577
++        cdef:
578
++            rados_snap_t _snap_seq = snap_seq
579
++            rados_snap_t *_snaps = NULL
580
++            int _num_snaps = len(sorted_snaps)
581
++        try:
582
++            _snaps = <rados_snap_t *>malloc(_num_snaps * sizeof(rados_snap_t))
583
++            for i in range(len(sorted_snaps)):
584
++                _snaps[i] = sorted_snaps[i]
585
++            with nogil:
586
++                ret = rados_ioctx_selfmanaged_snap_set_write_ctx(self.io,
587
++                                                                 _snap_seq,
588
++                                                                 _snaps,
589
++                                                                 _num_snaps)
590
++            if ret != 0:
591
++                raise make_ex(ret, "Failed to update snapshot write context")
592
++        finally:
593
++            free(_snaps)
594
++
595
++    @requires(('oid', str_type), ('snap_id', int))
596
++    def rollback_self_managed_snap(self, oid, snap_id):
597
++        """
598
++        Rolls an specific object back to a self-managed snapshot revision
599
++
600
++        :param oid: the name of the object
601
++        :type oid: str
602
++        :param snap_id: the name of the snapshot
603
++        :type snap_id: int
604
++
605
++        :raises: :class:`TypeError`
606
++        :raises: :class:`Error`
607
++        """
608
++        self.require_ioctx_open()
609
++        oid = cstr(oid, 'oid')
610
++        cdef:
611
++            char *_oid = oid
612
++            rados_snap_t _snap_id = snap_id
613
++        with nogil:
614
++            ret = rados_ioctx_selfmanaged_snap_rollback(self.io, _oid, _snap_id)
615
++        if ret != 0:
616
++            raise make_ex(ret, "Failed to rollback %s" % oid)
617
++
618
+     def get_last_version(self):
619
+         """
620
+         Return the version of the last object read or written to.
621
+diff --git a/src/test/pybind/test_rados.py b/src/test/pybind/test_rados.py
622
+index 88b8d2a..038a3c9 100644
623
+--- a/src/test/pybind/test_rados.py
624
+@@ -926,6 +926,43 @@ class TestObject(object):
625
+         eq(self.object.read(3), b'bar')
626
+         eq(self.object.read(3), b'baz')
627
+ 
628
++class TestIoCtxSelfManagedSnaps(object):
629
++    def setUp(self):
630
++        self.rados = Rados(conffile='')
631
++        self.rados.connect()
632
++        self.rados.create_pool('test_pool')
633
++        assert self.rados.pool_exists('test_pool')
634
++        self.ioctx = self.rados.open_ioctx('test_pool')
635
++
636
++    def tearDown(self):
637
++        cmd = {"prefix":"osd unset", "key":"noup"}
638
++        self.rados.mon_command(json.dumps(cmd), b'')
639
++        self.ioctx.close()
640
++        self.rados.delete_pool('test_pool')
641
++        self.rados.shutdown()
642
++
643
++    def test(self):
644
++        # cannot mix-and-match pool and self-managed snapshot mode
645
++        self.ioctx.set_self_managed_snap_write([])
646
++        self.ioctx.write('abc', b'abc')
647
++        snap_id_1 = self.ioctx.create_self_managed_snap()
648
++        self.ioctx.set_self_managed_snap_write([snap_id_1])
649
++
650
++        self.ioctx.write('abc', b'def')
651
++        snap_id_2 = self.ioctx.create_self_managed_snap()
652
++        self.ioctx.set_self_managed_snap_write([snap_id_1, snap_id_2])
653
++
654
++        self.ioctx.write('abc', b'ghi')
655
++
656
++        self.ioctx.rollback_self_managed_snap('abc', snap_id_1)
657
++        eq(self.ioctx.read('abc'), b'abc')
658
++
659
++        self.ioctx.rollback_self_managed_snap('abc', snap_id_2)
660
++        eq(self.ioctx.read('abc'), b'def')
661
++
662
++        self.ioctx.remove_self_managed_snap(snap_id_1)
663
++        self.ioctx.remove_self_managed_snap(snap_id_2)
664
++
665
+ class TestCommand(object):
666
+ 
667
+     def setUp(self):
0 668
new file mode 100644
... ...
@@ -0,0 +1,25 @@
0
+diff --git a/src/mon/CMakeLists.txt b/src/mon/CMakeLists.txt
1
+index 0404454..18d5ee1 100644
2
+--- a/src/mon/CMakeLists.txt
3
+@@ -5,6 +5,7 @@ set(lib_mon_srcs
4
+   ${osd_mon_files}
5
+   Paxos.cc
6
+   PaxosService.cc
7
++  ../osd/OSDCap.cc
8
+   OSDMonitor.cc
9
+   MDSMonitor.cc
10
+   FSCommands.cc
11
+diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc
12
+index 80a3ff0..8423dd5 100644
13
+--- a/src/mon/OSDMonitor.cc
14
+@@ -136,7 +136,7 @@ bool is_unmanaged_snap_op_permitted(CephContext* cct,
15
+ 
16
+   string caps_str;
17
+   if (caps_info.caps.length() > 0) {
18
+-    auto p = caps_info.caps.cbegin();
19
++    auto p = caps_info.caps.begin();
20
+     try {
21
+       decode(caps_str, p);
22
+     } catch (const buffer::error &err) {
... ...
@@ -14,7 +14,7 @@
14 14
 #################################################################################
15 15
 Name:       ceph
16 16
 Version:    12.2.4
17
-Release:    1%{?dist}
17
+Release:    2%{?dist}
18 18
 Epoch:      1
19 19
 Summary:    User space components of the Ceph file system
20 20
 License:    LGPL-2.1 and CC-BY-SA-1.0 and GPL-2.0 and BSL-1.0 and GPL-2.0-with-autoconf-exception and BSD-3-Clause and MIT
... ...
@@ -24,6 +24,8 @@ Source0:    http://ceph.com/download/%{name}-%{version}.tar.gz
24 24
 %define sha1 ceph=df93bc3fac55249f5f0d30caa567962b387693dd
25 25
 Vendor:     VMware, Inc.
26 26
 Distribution:   Photon
27
+Patch0:     CVE-2018-10861.patch
28
+Patch1:     build_fix_CVE-2018-10861.patch
27 29
 #################################################################################
28 30
 # dependencies that apply across all distro families
29 31
 #################################################################################
... ...
@@ -458,6 +460,8 @@ python-rbd, python-rgw or python-cephfs instead.
458 458
 #################################################################################
459 459
 %prep
460 460
 %setup -n ceph-%{version}
461
+%patch0 -p1
462
+%patch1 -p1
461 463
 
462 464
 %build
463 465
 %if %{with lowmem_builder}
... ...
@@ -1022,6 +1026,8 @@ ln -sf %{_libdir}/librbd.so.1 /usr/lib64/qemu/librbd.so.1
1022 1022
 # actually build this meta package.
1023 1023
 
1024 1024
 %changelog
1025
+*   Mon Oct 08 2018 Ankit Jain <ankitja@vmware.com> 12.2.4-2
1026
+-   fix CVE-2018-10861
1025 1027
 *   Thu Apr 19 2018 Xiaolin Li <xiaolinl@vmware.com> 12.2.4-1
1026 1028
 -   Updated to version 12.2.4, fix CVE-2018-7262
1027 1029
 *   Mon Sep 18 2017 Alexey Makhalov <amakhalov@vmware.com> 11.2.0-10