forked from cculianu/Fulcrum
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathStorage.cpp
2227 lines (2004 loc) · 108 KB
/
Storage.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
//
// Fulcrum - A fast & nimble SPV Server for Bitcoin Cash
// Copyright (C) 2019-2020 Calin A. Culianu <calin.culianu@gmail.com>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program (see LICENSE.txt). If not, see
// <https://www.gnu.org/licenses/>.
//
#include "BTC.h"
#include "CostCache.h"
#include "Mempool.h"
#include "Merkle.h"
#include "RecordFile.h"
#include "Storage.h"
#include "SubsMgr.h"
#include "rocksdb/db.h"
#include "rocksdb/iterator.h"
#include "rocksdb/merge_operator.h"
#include "rocksdb/options.h"
#include "rocksdb/slice.h"
#include <QByteArray>
#include <QDir>
#include <QVector> // we use this for the Height2Hash cache to save on memcopies since it's implicitly shared.
#include <atomic>
#include <cstring> // for memcpy
#include <list>
#include <optional>
#include <shared_mutex>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
DatabaseError::~DatabaseError(){} // weak vtable warning suppression
DatabaseSerializationError::~DatabaseSerializationError() {} // weak vtable warning suppression
DatabaseFormatError::~DatabaseFormatError() {} // weak vtable warning suppression
DatabaseKeyNotFound::~DatabaseKeyNotFound() {} // weak vtable warning suppression
HeaderVerificationFailure::~HeaderVerificationFailure() {} // weak vtable warning suppression
UndoInfoMissing::~UndoInfoMissing() {} // weak vtable warning suppression
HistoryTooLarge::~HistoryTooLarge() {} // weak vtable warning suppression
namespace {
/// Encapsulates the 'meta' db table
struct Meta {
uint32_t magic = 0xf33db33f, version = 0x1;
QString chain; ///< "test", "main", etc
uint16_t platformBits = sizeof(long)*8U; ///< we save the platform wordsize to the db
};
// some database keys we use -- todo: if this grows large, move it elsewhere
static const bool falseMem = false, trueMem = true;
static const rocksdb::Slice kMeta{"meta"}, kDirty{"dirty"}, kUtxoCount{"utxo_count"},
kTrue(reinterpret_cast<const char *>(&trueMem), sizeof(trueMem)),
kFalse(reinterpret_cast<const char *>(&falseMem), sizeof(trueMem));
// serialize/deser -- for basic types we use QDataStream, but we also have specializations at the end of this file
template <typename Type>
QByteArray Serialize(const Type & n) {
QByteArray ba;
if constexpr (std::is_base_of_v<QByteArray, Type>) {
ba = n;
} else {
QDataStream ds(&ba, QIODevice::WriteOnly|QIODevice::Truncate);
ds << n;
}
return ba;
}
template <typename Type>
Type Deserialize(const QByteArray &ba, bool *ok = nullptr) {
Type ret{};
if constexpr (std::is_base_of_v<QByteArray, Type>) {
ret = ba;
} else {
QDataStream ds(ba);
ds >> ret;
if (ok)
*ok = ds.status() == QDataStream::Status::Ok;
}
return ret;
}
/// Return a shallow, temporary copy of the memory of an object as a QByteArray. This reduces typing of
/// the boilerplate: "QByteArray::FromRawData(reinterpret_cast...." etc everywhere in this file.
/// Note: It is unsafe to use this function for anything other than obtaining a weak reference to the memory of an
/// object as a QByteArray for temporary purposes. The original object must live at least as long as this returned
/// QByteArray. Note that even copy-constructing a new QByteArray from this returned QByteArray will lead to
/// dangling pointers. See: https://doc.qt.io/qt-5/qbytearray.html#fromRawData.
template <typename Object,
std::enable_if_t<!std::is_pointer_v<std::remove_cv_t<Object>>, int> = 0>
QByteArray ShallowTmp(const Object *mem, size_t size = sizeof(Object)) {
return QByteArray::fromRawData(reinterpret_cast<const char *>(mem), int(size));
}
/// Construct a QByteArray from a deep copy of any object's memory area. Slower than ShallowTmp above but 100% safe
/// to use after the original object expires since the returned QByteArray takes ownership of its private copy of
/// the memory it allocated.
template <typename Object,
std::enable_if_t<!std::is_pointer_v<std::remove_cv_t<Object>>, int> = 0>
QByteArray DeepCpy(const Object *mem, size_t size = sizeof(Object)) {
return QByteArray(reinterpret_cast<const char *>(mem), int(size));
}
/// Serialize a simple value such as an int directly, without using the space overhead that QDataStream imposes.
/// This is less safe but is more compact since the bytes of the passed-in value are written directly to the
/// returned QByteArray, without any encapsulation. Note that use of this mechanism makes all data in the database
/// no longer platform-neutral, which is ok. The presumption is users can re-synch their DB if switching
/// architectures.
template <typename Scalar,
std::enable_if_t<std::is_scalar_v<Scalar> && !std::is_pointer_v<Scalar>, int> = 0>
QByteArray SerializeScalar (const Scalar & s) { return DeepCpy(&s); }
template <typename Scalar,
std::enable_if_t<std::is_scalar_v<Scalar> && !std::is_pointer_v<Scalar>, int> = 0>
QByteArray SerializeScalarNoCopy (const Scalar &s) { return ShallowTmp(&s); }
/// Inverse of above. Pass in an optional 'pos' pointer if you wish to continue reading raw scalars from the same
/// QByteArray during subsequent calls to this template function. *ok, if specified, is set to false if we ran off
/// the QByteArray's bounds, and a default-constructed value of 'Scalar' is returned. No other safety checking is
/// done. On successful deserialization of the scalar, *pos (if specified) is updated to point just past the
/// last byte of the successuflly converted item. On failure, *pos is always set to point past the end of the
/// QByteArray.
template <typename Scalar,
std::enable_if_t<std::is_scalar_v<Scalar> && !std::is_pointer_v<Scalar>, int> = 0>
Scalar DeserializeScalar(const QByteArray &ba, bool *ok = nullptr, int *pos_out = nullptr) {
Scalar ret{};
int dummy = 0;
int & pos = pos_out ? *pos_out : dummy;
if (pos >= 0 && pos + int(sizeof(ret)) <= ba.size()) {
if (ok) *ok = true;
ret = *reinterpret_cast<const Scalar *>(ba.data() + pos);
pos += sizeof(ret);
} else {
if (ok) *ok = false;
pos = ba.size();
}
return ret;
}
// specializations
template <> QByteArray Serialize(const Meta &);
template <> Meta Deserialize(const QByteArray &, bool *);
template <> QByteArray Serialize(const TXO &);
template <> TXO Deserialize(const QByteArray &, bool *);
template <> QByteArray Serialize(const TXOInfo &);
template <> TXOInfo Deserialize(const QByteArray &, bool *);
template <> [[maybe_unused]] QByteArray Serialize(const bitcoin::Amount &);
template <> bitcoin::Amount Deserialize(const QByteArray &, bool *);
// TxNumVec
using TxNumVec = std::vector<TxNum>;
// this serializes a vector of TxNums to a compact representation (6 bytes, eg 48 bits per TxNum), in little endian byte order
template <> QByteArray Serialize(const TxNumVec &);
// this deserializes a vector of TxNums from a compact representation (6 bytes, eg 48 bits per TxNum), assuming little endian byte order
template <> TxNumVec Deserialize(const QByteArray &, bool *);
// CompactTXO -- not currently used since we prefer toBytes() directly (TODO: remove if we end up never using this)
template <> QByteArray Serialize(const CompactTXO &);
template <> CompactTXO Deserialize(const QByteArray &, bool *);
/// NOTE: The slice should live as long as the returned QByteArray does. The QByteArray is a weak pointer into the slice!
inline QByteArray FromSlice(const rocksdb::Slice &s) { return ShallowTmp(s.data(), s.size()); }
/// Generic conversion from any type we operate on to a rocksdb::Slice. Note that the type in question should have
/// a conversion function written (eg Serialize) if it is anything other than a QByteArray or a scalar.
template<bool safeScalar=false, typename Thing>
auto ToSlice(const Thing &thing) {
if constexpr (std::is_base_of_v<rocksdb::Slice, Thing>) {
// same type, no-op, return ref to thing (const Slice &)
return static_cast<const rocksdb::Slice &>(thing);
} else if constexpr (std::is_base_of_v<QByteArray, Thing>) {
// QByteArray conversion, return reference to data in QByteArray
return rocksdb::Slice(thing.constData(), size_t(thing.size()));
} else if constexpr (!safeScalar && std::is_scalar_v<Thing> && !std::is_pointer_v<Thing>) {
return rocksdb::Slice(reinterpret_cast<const char *>(&thing), sizeof(thing)); // returned slice points to raw scalar memory itself
} else {
// the purpose of this holder is to keep the temporary QByteArray alive for as long as the slice itself is alive
struct BagOfHolding {
QByteArray bytes;
rocksdb::Slice slice;
operator const rocksdb::Slice &() const { return slice; }
} h { Serialize(thing), ToSlice(h.bytes) };
return h; // this holder type "acts like" a Slice due to its operator const Slice &()
}
};
/// Helper to get db name (basename of path)
QString DBName(const rocksdb::DB *db) { return QFileInfo(QString::fromStdString(db->GetName())).baseName(); }
/// Helper to just get the status error string as a QString
QString StatusString(const rocksdb::Status & status) { return QString::fromStdString(status.ToString()); }
/// DB read/write helpers
/// NOTE: these may throw DatabaseError
/// If missingOk=false, then the returned optional is guaranteed to have a value if this function returns without throwing.
/// If missingOk=true, then if there was no other database error and the key was not found, the returned optional !has_value()
///
/// Template arg "safeScalar", if true, will deserialize scalar int, float, etc data using the Deserialize<>
/// function (uses QDataStream, is platform neutral, but is slightly slower). If false, we will use the
/// DeserializeScalar<> fast function for scalars such as ints. It's important to read from the DB in the same
/// 'safeScalar' mode as was written!
template <typename RetType, bool safeScalar = false, typename KeyType>
std::optional<RetType> GenericDBGet(rocksdb::DB *db, const KeyType & keyIn, bool missingOk = false,
const QString & errorMsgPrefix = QString(), ///< used to specify a custom error message in the thrown exception
bool acceptExtraBytesAtEndOfData = false,
const rocksdb::ReadOptions & ropts = rocksdb::ReadOptions()) ///< if true, we are ok with extra unparsed bytes in data. otherwise we throw. (this check is only done for !safeScalar mode on basic types)
{
rocksdb::PinnableSlice datum;
std::optional<RetType> ret;
if (UNLIKELY(!db)) throw InternalError("GenericDBGet was passed a null pointer!");
const auto status = db->Get(ropts, db->DefaultColumnFamily(), ToSlice<safeScalar>(keyIn), &datum);
if (status.IsNotFound()) {
if (missingOk)
return ret; // optional will not has_value() to indicate missing key
throw DatabaseKeyNotFound(QString("%1: %2")
.arg(!errorMsgPrefix.isEmpty() ? errorMsgPrefix : QString("Key not found in db %1").arg(DBName(db)))
.arg(StatusString(status)));
} else if (!status.ok()) {
throw DatabaseError(QString("%1: %2")
.arg(!errorMsgPrefix.isEmpty() ? errorMsgPrefix : QString("Error reading a key from db %1").arg(DBName(db)))
.arg(StatusString(status)));
} else {
// ok status
if constexpr (std::is_base_of_v<QByteArray, std::remove_cv_t<RetType> >) {
static_assert (!safeScalar, "safeScalar=true mode is not supported for QByteArrays (it only is useful for scalar types)" );
// special compile-time case for QByteArray subclasses -- return a deep copy of the data bytes directly.
// TODO: figure out a way to do this without the 1 extra copy! (PinnableSlice -> ret).
ret.emplace( reinterpret_cast<const char *>(datum.data()), QByteArray::size_type(datum.size()) );
} else if constexpr (!safeScalar && std::is_scalar_v<RetType> && !std::is_pointer_v<RetType>) {
if (!acceptExtraBytesAtEndOfData && datum.size() > sizeof(RetType)) {
// reject extra stuff at end of data stream
throw DatabaseFormatError(QString("%1: Extra bytes at the end of data")
.arg(!errorMsgPrefix.isEmpty() ? errorMsgPrefix : QString("Database format error in db %1").arg(DBName(db))));
}
bool ok;
ret.emplace( DeserializeScalar<RetType>(FromSlice(datum), &ok) );
if (!ok) {
throw DatabaseSerializationError(
QString("%1: Key was retrieved ok, but data could not be deserialized as a scalar '%2'")
.arg(!errorMsgPrefix.isEmpty() ? errorMsgPrefix : QString("Error deserializing a scalar from db %1").arg(DBName(db)))
.arg(typeid (RetType).name()));
}
} else {
if (UNLIKELY(acceptExtraBytesAtEndOfData))
Debug() << "Warning: Caller misuse of function '" << __FUNCTION__
<< "'. 'acceptExtraBytesAtEndOfData=true' is ignored when deserializing using QDataStream.";
bool ok;
ret.emplace( Deserialize<RetType>(FromSlice(datum), &ok) );
if (!ok) {
throw DatabaseSerializationError(
QString("%1: Key was retrieved ok, but data could not be deserialized")
.arg(!errorMsgPrefix.isEmpty() ? errorMsgPrefix : QString("Error deserializing an object from db %1").arg(DBName(db))));
}
}
}
return ret;
}
/// Conveneience for above with the missingOk flag set to false. Will always throw or return a real value.
template <typename RetType, bool safeScalar = false, typename KeyType>
RetType GenericDBGetFailIfMissing(rocksdb::DB * db, const KeyType &k, const QString &errMsgPrefix = QString(), bool extraDataOk = false,
const rocksdb::ReadOptions & ropts = rocksdb::ReadOptions())
{
return GenericDBGet<RetType, safeScalar>(db, k, false, errMsgPrefix, extraDataOk, ropts).value();
}
/// Throws on all errors. Otherwise writes to db.
template <bool safeScalar = false, typename KeyType, typename ValueType>
void GenericDBPut
(rocksdb::DB *db, const KeyType & key, const ValueType & value,
const QString & errorMsgPrefix = QString(), ///< used to specify a custom error message in the thrown exception
const rocksdb::WriteOptions & opts = rocksdb::WriteOptions())
{
auto st = db->Put(opts, ToSlice<safeScalar>(key), ToSlice<safeScalar>(value));
if (!st.ok())
throw DatabaseError(QString("%1: %2")
.arg(!errorMsgPrefix.isEmpty() ? errorMsgPrefix : QString("Error writing to db %1").arg(DBName(db)))
.arg(StatusString(st)));
}
/// Throws on all errors. Otherwise enqueues a write to the batch.
template <bool safeScalar = false, typename KeyType, typename ValueType>
void GenericBatchPut
(rocksdb::WriteBatch & batch, const KeyType & key, const ValueType & value,
const QString & errorMsgPrefix = QString()) ///< used to specify a custom error message in the thrown exception
{
auto st = batch.Put(ToSlice<safeScalar>(key), ToSlice<safeScalar>(value));
if (!st.ok())
throw DatabaseError(QString("%1: %2")
.arg(!errorMsgPrefix.isEmpty() ? errorMsgPrefix : "Error from WriteBatch::Put")
.arg(StatusString(st)));
}
/// Throws on all errors. Otherwise enqueues a delete to the batch.
template <bool safeScalar = false, typename KeyType>
void GenericBatchDelete
(rocksdb::WriteBatch & batch, const KeyType & key,
const QString & errorMsgPrefix = QString()) ///< used to specify a custom error message in the thrown exception
{
auto st = batch.Delete(ToSlice<safeScalar>(key));
if (!st.ok())
throw DatabaseError(QString("%1: %2")
.arg(!errorMsgPrefix.isEmpty() ? errorMsgPrefix : "Error from WriteBatch::Delete")
.arg(StatusString(st)));
}
/// A convenient wrapper to db->Write(batch...) which throws on all errors.
void GenericBatchWrite(rocksdb::DB *db, rocksdb::WriteBatch & batch,
const QString & errorMsgPrefix = QString(),
const rocksdb::WriteOptions & opts = rocksdb::WriteOptions())
{
auto st = db->Write(opts, &batch);
if (!st.ok())
throw DatabaseError(QString("%1: %2")
.arg(!errorMsgPrefix.isEmpty() ? errorMsgPrefix : QString("Error writing batch to db %1").arg(DBName(db)))
.arg(StatusString(st)));
}
/// Throws on all errors. Otherwise deletes a key from db. It is not an error to delete a non-existing key.
template <bool safeScalar = false, typename KeyType>
void GenericDBDelete
(rocksdb::DB *db, const KeyType & key,
const QString & errorMsgPrefix = QString(), ///< used to specify a custom error message in the thrown exception
const rocksdb::WriteOptions & opts = rocksdb::WriteOptions())
{
auto st = db->Delete(opts, ToSlice<safeScalar>(key));
if (!st.ok())
throw DatabaseError(QString("%1: %2")
.arg(!errorMsgPrefix.isEmpty() ? errorMsgPrefix : QString("Error deleting a key from db %1").arg(DBName(db)))
.arg(StatusString(st)));
}
//// A helper data struct -- written to the blkinfo table. This helps localize a txnum to a specific position in
/// a block. The table is keyed off of block_height(uint32_t) -> serialized BlkInfo (raw bytes)
struct BlkInfo {
TxNum txNum0 = 0;
unsigned nTx = 0;
BlkInfo() = default;
BlkInfo(const BlkInfo &) = default;
[[maybe_unused]] BlkInfo (TxNum txn, unsigned ntx) : txNum0(txn), nTx(ntx) {}
bool operator==(const BlkInfo &o) const { return txNum0 == o.txNum0 && nTx == o.nTx; }
bool operator!=(const BlkInfo &o) const { return !(*this == o); }
[[maybe_unused]] bool operator<(const BlkInfo &o) const { return txNum0 == o.txNum0 ? nTx < o.nTx : txNum0 < o.txNum0; }
BlkInfo &operator=(const BlkInfo &) = default;
};
// serializes as raw bytes from struct
template <> QByteArray Serialize(const BlkInfo &);
// deserializes as raw bytes from struct
template <> BlkInfo Deserialize(const QByteArray &, bool *);
/// Block rewind/undo information. One of these is kept around in the db for the last 10 blocks.
/// It basically stores a record of all the UTXO's added and removed, as well as the set of
/// scripthashes.
struct UndoInfo {
using ScriptHashSet = std::unordered_set<HashX, HashHasher>;
using UTXOAddUndo = std::tuple<TXO, HashX, CompactTXO>;
using UTXODelUndo = std::tuple<TXO, TXOInfo>;
BlockHeight height = 0; ///< we save a copy of this infomation as a sanity check
BlockHash hash; ///< we save a copy of this information as a sanity check. (bytes are in "reversed", bitcoind ToHex()-style memory order)
BlkInfo blkInfo; ///< we save a copy of this from the global value for convenience and as a sanity check.
// below is the actual critical undo information
ScriptHashSet scriptHashes;
std::vector<UTXOAddUndo> addUndos;
std::vector<UTXODelUndo> delUndos;
[[maybe_unused]] QString toDebugString() const;
[[maybe_unused]] bool operator==(const UndoInfo &) const; // for debug ser/deser
bool isValid() const { return hash.size() == HashLen; } ///< cheap, imperfect check for validity
void clear() { height = 0; hash.clear(); blkInfo = BlkInfo(); scriptHashes.clear(); addUndos.clear(); delUndos.clear(); }
};
QString UndoInfo::toDebugString() const {
QString ret;
QTextStream ts(&ret);
ts << "<Undo info for height: " << height << " addUndos: " << addUndos.size() << " delUndos: " << delUndos.size()
<< " scriptHashes: " << scriptHashes.size() << " nTx: " << blkInfo.nTx << " txNum0: " << blkInfo.txNum0
<< " hash: " << hash.toHex() << ">";
return ret;
}
bool UndoInfo::operator==(const UndoInfo &o) const {
return height == o.height && hash == o.hash && blkInfo == o.blkInfo && scriptHashes == o.scriptHashes
&& addUndos == o.addUndos && delUndos == o.delUndos;
}
// serialize as raw bytes mostly (no QDataStream)
template <> QByteArray Serialize(const UndoInfo &);
// serialize from raw bytes mostly (no QDataStream)
template <> UndoInfo Deserialize(const QByteArray &, bool *);
/// Associative merge operator used for scripthash history concatenation
/// TODO: this needs to be made more efficient by implementing the real MergeOperator interface and combining
/// appends efficiently to reduce allocations. Right now it's called for each append.
class ConcatOperator : public rocksdb::AssociativeMergeOperator {
public:
~ConcatOperator() override;
mutable std::atomic<unsigned> merges = 0;
// Gives the client a way to express the read -> modify -> write semantics
// key: (IN) The key that's associated with this merge operation.
// existing_value:(IN) null indicates the key does not exist before this op
// value: (IN) the value to update/merge the existing_value with
// new_value: (OUT) Client is responsible for filling the merge result
// here. The string that new_value is pointing to will be empty.
// logger: (IN) Client could use this to log errors during merge.
//
// Return true on success.
// All values passed in will be client-specific values. So if this method
// returns false, it is because client specified bad data or there was
// internal corruption. The client should assume that this will be treated
// as an error by the library.
bool Merge(const rocksdb::Slice& key, const rocksdb::Slice* existing_value,
const rocksdb::Slice& value, std::string* new_value,
rocksdb::Logger* logger) const override;
const char* Name() const override { return "ConcatOperator"; /* NOTE: this must be the same for the same db each time it is opened! */ }
};
ConcatOperator::~ConcatOperator() {} // weak vtable warning prevention
bool ConcatOperator::Merge(const rocksdb::Slice& key, const rocksdb::Slice* existing_value,
const rocksdb::Slice& value, std::string* new_value, rocksdb::Logger* logger) const
{
(void)key; (void)logger;
++merges;
new_value->resize( (existing_value ? existing_value->size() : 0) + value.size() );
char *cur = new_value->data();
if (existing_value) {
std::memcpy(cur, existing_value->data(), existing_value->size());
cur += existing_value->size();
}
std::memcpy(cur, value.data(), value.size());
return true;
}
}
struct Storage::Pvt
{
/* NOTE: If taking multiple locks, all locks should be taken in the order they are declared, to avoid deadlocks. */
constexpr int blockHeaderSize() { return BTC::GetBlockHeaderSize(); }
Meta meta;
Lock metaLock;
std::atomic<std::underlying_type_t<SaveItem>> pendingSaves{0};
struct RocksDBs {
const rocksdb::ReadOptions defReadOpts; ///< avoid creating this each time
const rocksdb::WriteOptions defWriteOpts; ///< avoid creating this each time
rocksdb::Options opts, shistOpts;
std::shared_ptr<ConcatOperator> concatOperator;
std::unique_ptr<rocksdb::DB> meta, blkinfo, utxoset,
shist, shunspent, // scripthash_history and scripthash_unspent
undo; // undo (reorg rewind)
} db;
std::unique_ptr<RecordFile> txNumsFile;
std::unique_ptr<RecordFile> headersFile;
/// Big lock used for block/history updates. Public methods that read the history such as getHistory and listUnspent
/// take this as read-only (shared), and addBlock and undoLatestBlock take this as read/write (exclusively).
/// This is intended to be a coarse lock. Currently the update code takes this along with headerVerifierLock and
/// blkInfoLock at the same time, so it's (as of now) equivalent to either of those two locks.
/// TODO: See about removing all the other locks and keeping one general RWLock for all updates?
mutable RWLock blocksLock;
BTC::HeaderVerifier headerVerifier;
mutable RWLock headerVerifierLock;
std::atomic<TxNum> txNumNext{0};
std::vector<BlkInfo> blkInfos;
std::map<TxNum, unsigned> blkInfosByTxNum; ///< ordered map of TxNum0 for a block -> index into above blkInfo array
RWLock blkInfoLock; ///< locks blkInfos and blkInfosByTxNum
std::atomic<int64_t> utxoCt = 0;
std::atomic<uint32_t> earliestUndoHeight = UINT32_MAX; ///< the purpose of this is to control when we issue "delete" commands to the db for deleting expired undo infos from the undo db
/// This cache is anticipated to see heavy use for get_history, so we may wish to make it larger. MAKE THIS CONFIGURABLE.
static constexpr size_t kMaxNum2HashMemoryBytes = 100*1000*1000; ///< 100MiB max cache
CostCache<TxNum, TxHash> lruNum2Hash{kMaxNum2HashMemoryBytes};
unsigned constexpr lruNum2HashSizeCalc(unsigned nItems = 1) {
return decltype(lruNum2Hash)::itemOverheadBytes() + (nItems * HashLen);
}
static constexpr size_t kMaxHeight2HashesMemoryBytes = 100*1000*1000; // 100 MiB max cache
/// Cache BlockHeight -> vector of txHashes for the block (in bitcoind memory order). This gets cleared by
/// undoLatestBlock. This is used by the txHashesForBlock function only (which is used by get_merkle and
/// id_from_pos in the protocol). TODO: MAKE THIS CACHE SIZE CONFIGURABLE.
/// Also TODO: Maybe use a QCache here and/or use a memcost-based limit. Right now we can't predict the memory
/// cost using this size-based cache limit provided by LRU::Cache.
CostCache<BlockHeight, QVector<TxHash>> lruHeight2Hashes_BitcoindMemOrder { kMaxHeight2HashesMemoryBytes };
/// returns the cost for a particular cache item based on the number of hashes in the vector
unsigned constexpr lruHeight2HashSizeCalc(size_t nHashes) {
// each cache item with nHashes takes roughly this much memory
return unsigned( (nHashes * (HashLen + sizeof(TxHash))) + decltype(lruHeight2Hashes_BitcoindMemOrder)::itemOverheadBytes() );
}
/// this object is thread safe, but it needs to be initialized with headers before allowing client connections.
std::unique_ptr<Merkle::Cache> merkleCache;
HeaderHash genesisHash; // written-to once by either loadHeaders code or addBlock for block 0. Guarded by headerVerifierLock.
Mempool mempool; ///< app-wide mempool data -- does not get saved to db. Controller.cpp writes to this
Mempool::FeeHistogramVec mempoolFeeHistogram; ///< refreshed periodically by refreshMempoolHistogram()
RWLock mempoolLock;
};
Storage::Storage(const std::shared_ptr<Options> & options_)
: Mgr(nullptr), options(options_), subsmgr(new SubsMgr(options, this)), p(std::make_unique<Pvt>())
{
setObjectName("Storage");
_thread.setObjectName(objectName());
}
Storage::~Storage() { Debug() << __func__; cleanup(); }
void Storage::startup()
{
Log() << "Loading database ...";
if (UNLIKELY(!subsmgr || !options))
throw BadArgs("Storage instance constructed with nullptr for `options` and/or `subsmgr` -- FIXME!");
subsmgr->startup(); // trivial, always succeeds if constructed correctly
{
// set up the merkle cache object
using namespace std::placeholders;
p->merkleCache = std::make_unique<Merkle::Cache>(std::bind(&Storage::merkleCacheHelperFunc, this, _1, _2, _3));
}
{ // open all db's ...
rocksdb::Options & opts(p->db.opts), &shistOpts(p->db.shistOpts);
// Optimize RocksDB. This is the easiest way to get RocksDB to perform well
opts.IncreaseParallelism(int(Util::getNPhysicalProcessors()));
opts.OptimizeLevelStyleCompaction();
// create the DB if it's not already present
opts.create_if_missing = true;
opts.error_if_exists = false;
//opts.max_open_files = 50; ///< testing -- seems this affects memory usage see: https://github.com/facebook/rocksdb/issues/4112
opts.keep_log_file_num = 5; // ??
opts.compression = rocksdb::CompressionType::kNoCompression; // for now we test without compression. TODO: characterize what is fastest and best..
shistOpts = opts; // copy what we just did
shistOpts.merge_operator = p->db.concatOperator = std::make_shared<ConcatOperator>(); // this set of options uses the concat merge operator (we use this to append to history entries in the db)
using DBInfoTup = std::tuple<QString, std::unique_ptr<rocksdb::DB> &, const rocksdb::Options &>;
const std::list<DBInfoTup> dbs2open = {
{ "meta", p->db.meta, opts },
{ "blkinfo" , p->db.blkinfo , opts },
{ "utxoset", p->db.utxoset, opts },
{ "scripthash_history", p->db.shist, shistOpts },
{ "scripthash_unspent", p->db.shunspent, opts },
{ "undo", p->db.undo, opts },
};
const auto OpenDB = [this](const DBInfoTup &tup) {
auto & [name, uptr, opts] = tup;
rocksdb::DB *db = nullptr;
rocksdb::Status s;
// try and open database
const QString path = options->datadir + QDir::separator() + name;
s = rocksdb::DB::Open( opts, path.toStdString(), &db);
if (!s.ok() || !db)
throw DatabaseError(QString("Error opening %1 database: %2 (path: %3)")
.arg(name).arg(StatusString(s)).arg(path));
uptr.reset(db);
};
// open all db's defined above
for (auto & tup : dbs2open)
OpenDB(tup);
} // /open db's
// load/check meta
{
Meta m_db;
static const QString errMsg{"Incompatible database format -- delete the datadir and resynch. RocksDB error"};
if (auto opt = GenericDBGet<Meta>(p->db.meta.get(), kMeta, true, errMsg);
opt.has_value())
{
m_db = opt.value();
if (m_db.magic != p->meta.magic || m_db.version != p->meta.version || m_db.platformBits != p->meta.platformBits) {
throw DatabaseFormatError(errMsg);
}
p->meta = m_db;
Debug () << "Read meta from db ok";
if (!p->meta.chain.isEmpty())
Log() << "Chain: " << p->meta.chain;
} else {
// ok, did not exist .. write a new one to db
saveMeta_impl();
}
if (isDirty()) {
throw DatabaseError("It appears that " APPNAME " was forcefully killed in the middle of committng a block to the db. "
"We cannot figure out where exactly in the update process " APPNAME " was killed, so we "
"cannot undo the inconsistent state caused by the unexpected shutdown. Sorry!"
"\n\nThe database has been corrupted. Please delete the datadir and resynch to bitcoind.\n");
}
}
// load headers -- may throw.. this must come first
loadCheckHeadersInDB();
// check txnums
loadCheckTxNumsFileAndBlkInfo();
// count utxos -- note this depends on "blkInfos" being filled in so it much be called after loadCheckTxNumsFileAndBlkInfo()
loadCheckUTXOsInDB();
// load check earliest undo to populate earliestUndoHeight
loadCheckEarliestUndo();
start(); // starts our thread
}
void Storage::cleanup()
{
stop(); // joins our thread
if (subsmgr) subsmgr->cleanup();
// TODO: unsaved/"dirty state" detection here -- and forced save, if needed.
}
auto Storage::stats() const -> Stats
{
// TODO ... more stuff here, perhaps
QVariantMap ret;
auto & c = p->db.concatOperator;
ret["merge calls"] = c ? c->merges.load() : QVariant();
QVariantMap caches;
{
QVariantMap m;
const auto sz = p->lruNum2Hash.size(), szBytes = p->lruNum2Hash.totalCost();
m["nItems"] = qlonglong(sz);
m["Size bytes"] = qlonglong(szBytes);
caches["LRU Cache: TxNum -> TxHash"] = m;
}
{
QVariantMap m;
const unsigned nItems = p->lruHeight2Hashes_BitcoindMemOrder.size(), szBytes = p->lruHeight2Hashes_BitcoindMemOrder.totalCost();
m["nBlocks"] = nItems;
m["Size bytes"] = szBytes;
caches["LRU Cache: Block Height -> TxHashes"] = m;
}
{
const size_t nHashes = p->merkleCache->size(), bytes = nHashes * (HashLen + sizeof(HeaderHash));
caches["merkleHeaders_Size"] = qulonglong(nHashes);
caches["merkleHeaders_SizeBytes"] = qulonglong(bytes);
}
ret["caches"] = caches;
return ret;
}
// Keep returned LockGuard in scope while you use the HeaderVerifier
auto Storage::headerVerifier() -> std::pair<BTC::HeaderVerifier &, ExclusiveLockGuard>
{
return std::pair<BTC::HeaderVerifier &, ExclusiveLockGuard>( p->headerVerifier, p->headerVerifierLock );
}
auto Storage::headerVerifier() const -> std::pair<const BTC::HeaderVerifier &, SharedLockGuard>
{
return std::pair<const BTC::HeaderVerifier &, SharedLockGuard>( p->headerVerifier, p->headerVerifierLock );
}
QString Storage::getChain() const
{
LockGuard l(p->metaLock);
return p->meta.chain;
}
void Storage::setChain(const QString &chain)
{
{
LockGuard l(p->metaLock);
p->meta.chain = chain;
}
Log() << "Chain: " << chain;
save(SaveItem::Meta);
}
/// returns the "next" TxNum
TxNum Storage::getTxNum() const { return p->txNumNext.load(); }
auto Storage::latestTip(Header *hdrOut) const -> std::pair<int, HeaderHash> {
std::pair<int, HeaderHash> ret = headerVerifier().first.lastHeaderProcessed(); // ok; lock stays locked until statement end.
if (hdrOut) *hdrOut = ret.second; // this is not a hash but the actual block header
if (ret.second.isEmpty() || ret.first < 0) {
ret.first = -1;
ret.second.clear();
if (hdrOut) hdrOut->clear();
} else {
// .ret now has the actual header but we want the hash
ret.second = BTC::HashRev(ret.second);
}
return ret;
}
void Storage::save(SaveSpec typed_spec)
{
using IntType = decltype(p->pendingSaves.load());
// enqueue save on event loop if not previously enqueued (we know it was previously enqueued if the p->pendingSaves
// atomic variable is not 0).
if (const auto spec = IntType(typed_spec); ! p->pendingSaves.fetch_or(spec))
{
QTimer::singleShot(0, this, [this]{save_impl();});
}
}
void Storage::save_impl(SaveSpec override)
{
if (const auto flags = SaveSpec(p->pendingSaves.exchange(0))|override; flags) { // atomic clear of flags, grab prev val
try {
if (flags & SaveItem::Meta) { // Meta
LockGuard l(p->metaLock);
saveMeta_impl();
}
} catch (const std::exception & e) {
Fatal() << e.what(); // will abort app...
}
}
}
void Storage::saveMeta_impl()
{
if (!p->db.meta) return;
if (auto status = p->db.meta->Put(p->db.defWriteOpts, kMeta, ToSlice(Serialize(p->meta))); !status.ok()) {
throw DatabaseError("Failed to write meta to db");
}
Debug() << "Wrote new metadata to db";
}
void Storage::appendHeader(const Header &h, BlockHeight height)
{
const auto targetHeight = p->headersFile->numRecords();
if (UNLIKELY(height != targetHeight))
throw InternalError(QString("Bad use of appendHeader -- expected height %1, got height %2").arg(targetHeight).arg(height));
QString err;
const auto res = p->headersFile->appendRecord(h, true, &err);
if (UNLIKELY(!err.isEmpty()))
throw DatabaseError(QString("Failed to append header %1: %2").arg(height).arg(err));
else if (UNLIKELY(!res.has_value() || res.value() != height))
throw DatabaseError(QString("Failed to append header %1: returned count is bad").arg(height));
}
void Storage::deleteHeadersPastHeight(BlockHeight height)
{
QString err;
const auto res = p->headersFile->truncate(height + 1, &err);
if (!err.isEmpty())
throw DatabaseError(QString("Failed to truncate headers past height %1: %2").arg(height).arg(err));
else if (res != height + 1)
throw InternalError("header truncate returned an unexepected value");
}
auto Storage::headerForHeight(BlockHeight height, QString *err) -> std::optional<Header>
{
std::optional<Header> ret;
if (int(height) <= latestTip().first && int(height) >= 0) {
ret = headerForHeight_nolock(height, err);
} else if (err) { *err = QString("Height %1 is out of range").arg(height); }
return ret;
}
auto Storage::headerForHeight_nolock(BlockHeight height, QString *err) -> std::optional<Header>
{
std::optional<Header> ret;
try {
QString err1;
ret.emplace( p->headersFile->readRecord(height, &err1) );
if (!err1.isEmpty()) {
ret.reset();
throw DatabaseError(QString("failed to read header %1: %2").arg(height).arg(err1));
}
} catch (const std::exception &e) {
if (err) *err = e.what();
}
return ret;
}
auto Storage::headersFromHeight_nolock_nocheck(BlockHeight height, unsigned num, QString *err) -> std::vector<Header>
{
if (err) err->clear();
std::vector<Header> ret = p->headersFile->readRecords(height, num, err);
if (ret.size() != num && err && err->isEmpty())
*err = "short header count returned from headers file";
ret.shrink_to_fit();
return ret;
}
/// Convenient batched alias for above. Returns a set of headers starting at height. May return < count if not
/// all headers were found. Thead safe.
auto Storage::headersFromHeight(BlockHeight height, unsigned count, QString *err) -> std::vector<Header>
{
std::vector<Header> ret;
SharedLockGuard g(p->blocksLock); // to ensure clients get a consistent view
int num = std::min(1 + latestTip().first - int(height), int(count)); // note this also takes a lock briefly so we need to do this after the lockguard above
if (num > 0) {
ret = headersFromHeight_nolock_nocheck(height, count, err);
} else if (err) *err = "No headers in the specified range";
return ret;
}
void Storage::loadCheckHeadersInDB()
{
assert(p->blockHeaderSize() > 0);
p->headersFile = std::make_unique<RecordFile>(options->datadir + QDir::separator() + "headers", size_t(p->blockHeaderSize()), 0x00f026a1); // may throw
Log() << "Verifying headers ...";
uint32_t num = unsigned(p->headersFile->numRecords());
std::vector<QByteArray> hVec;
const auto t0 = Util::getTimeNS();
{
if (num > MAX_HEADERS)
throw DatabaseFormatError(QString("Header count (%1) in database exceeds MAX_HEADERS! This is likely due to"
" a database format mistmatch. Delete the datadir and resynch it.")
.arg(num));
// verify headers: hashPrevBlock must match what we actually read from db
if (num) {
Debug() << "Verifying " << num << " " << Util::Pluralize("header", num) << " ...";
QString err;
hVec = headersFromHeight_nolock_nocheck(0, num, &err);
if (!err.isEmpty() || hVec.size() != num)
throw DatabaseFormatError(QString("%1. Possible databaase corruption. Delete the datadir and resynch.").arg(err.isEmpty() ? "Could not read all headers" : err));
auto [verif, lock] = headerVerifier();
// set genesis hash
p->genesisHash = BTC::HashRev(hVec.front());
const QString errMsg("Error retrieving header from db");
err.clear();
// read db
for (uint32_t i = 0; i < num; ++i) {
auto & bytes = hVec[i];
if (!verif(bytes, &err))
throw DatabaseFormatError(QString("%1. Possible databaase corruption. Delete the datadir and resynch.").arg(err));
bytes = BTC::Hash(bytes); // replace the header in the vector with its hash because it will be needed below...
}
}
}
if (num) {
const auto elapsed = Util::getTimeNS();
Debug() << "Read & verified " << num << " " << Util::Pluralize("header", num) << " from db in " << QString::number((elapsed-t0)/1e6, 'f', 3) << " msec";
}
if (!p->merkleCache->isInitialized() && !hVec.empty())
p->merkleCache->initialize(hVec); // this may take a few seconds, and it may also throw
}
void Storage::loadCheckTxNumsFileAndBlkInfo()
{
// may throw.
p->txNumsFile = std::make_unique<RecordFile>(options->datadir + QDir::separator() + "txnum2txhash", HashLen, 0x000012e2);
p->txNumNext = p->txNumsFile->numRecords();
Debug() << "Read TxNumNext from file: " << p->txNumNext.load();
TxNum ct = 0;
if (const int height = latestTip().first; height >= 0)
{
p->blkInfos.reserve(std::min(size_t(height+1), MAX_HEADERS));
Log() << "Checking tx counts ...";
for (int i = 0; i <= height; ++i) {
static const QString errMsg("Failed to read a blkInfo from db, the database may be corrupted");
const auto blkInfo = GenericDBGetFailIfMissing<BlkInfo>(p->db.blkinfo.get(), uint32_t(i), errMsg, false, p->db.defReadOpts);
if (blkInfo.txNum0 != ct)
throw DatabaseFormatError(QString("BlkInfo for height %1 does not match computed txNum of %2."
"\n\nThe database may be corrupted. Delete the datadir and resynch it.\n")
.arg(i).arg(ct));
ct += blkInfo.nTx;
p->blkInfos.emplace_back(blkInfo);
p->blkInfosByTxNum[blkInfo.txNum0] = unsigned(p->blkInfos.size()-1);
}
Log() << ct << " total transactions";
}
if (ct != p->txNumNext) {
throw DatabaseFormatError(QString("BlkInfo txNums do not add up to expected value of %1 != %2."
"\n\nThe database may be corrupted. Delete the datadir and resynch it.\n")
.arg(ct).arg(p->txNumNext.load()));
}
}
// NOTE: this must be called *after* loadCheckTxNumsFileAndBlkInfo(), because it needs a valid p->txNumNext
void Storage::loadCheckUTXOsInDB()
{
FatalAssert(!!p->db.utxoset) << __FUNCTION__ << ": Utxo set db is not open";
if (options->doSlowDbChecks) {
Log() << "CheckDB: Verifying utxo set (this may take some time) ...";
const auto t0 = Util::getTimeNS();
{
const int currentHeight = latestTip().first;
std::unique_ptr<rocksdb::Iterator> iter(p->db.utxoset->NewIterator(p->db.defReadOpts));
if (!iter) throw DatabaseError("Unable to obtain an iterator to the utxo set db");
p->utxoCt = 0;
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
// TODO: the below checks may be too slow. See about removing them and just counting the iter.
const auto txo = Deserialize<TXO>(FromSlice(iter->key()));
if (!txo.isValid()) {
throw DatabaseSerializationError("Read an invalid txo from the utxo set database."
" This may be due to a database format mismatch."
"\n\nDelete the datadir and resynch to bitcoind.\n");
}
auto info = Deserialize<TXOInfo>(FromSlice(iter->value()));
if (!info.isValid())
throw DatabaseSerializationError(QString("Txo %1 has invalid metadata in the db."
" This may be due to a database format mismatch."
"\n\nDelete the datadir and resynch to bitcoind.\n")
.arg(txo.toString()));
// uncomment this to do a deep test: TODO: Make this configurable from the CLI -- this last check is very slow.
const CompactTXO ctxo = CompactTXO(info.txNum, txo.prevoutN);
const QByteArray shuKey = info.hashX + ctxo.toBytes();
static const QString errPrefix("Error reading scripthash_unspent");
QByteArray tmpBa;
if (bool fail1 = false, fail2 = false, fail3 = false, fail4 = false;
(fail1 = (info.confirmedHeight.has_value() && int(info.confirmedHeight.value()) > currentHeight))
|| (fail2 = info.txNum >= p->txNumNext)
|| (fail3 = (tmpBa = GenericDBGet<QByteArray>(p->db.shunspent.get(), shuKey, true, errPrefix, false, p->db.defReadOpts).value_or("")).isEmpty())
|| (fail4 = (info.amount != Deserialize<bitcoin::Amount>(tmpBa)))) {
// TODO: reorg? Inconsisent db? FIXME
QString msg;
{
QTextStream ts(&msg);
ts << "Inconsistent database: txo " << txo.toString() << " at height: "
<< info.confirmedHeight.value();
if (fail1) {
ts << " > current height: " << currentHeight << ".";
} else if (fail2) {
ts << ". TxNum: " << info.txNum << " >= " << p->txNumNext << ".";
} else if (fail3) {
ts << ". Failed to find ctxo " << ctxo.toString() << " in the scripthash_unspent db.";
} else if (fail4) {
ts << ". Utxo amount does not match the ctxo amount in the scripthash_unspent db.";
}
ts << "\n\nThe database has been corrupted. Please delete the datadir and resynch to bitcoind.\n";
}
throw DatabaseError(msg);
}
if (0 == ++p->utxoCt % 100000) {
*(0 == p->utxoCt % 2500000 ? std::make_unique<Log>() : std::make_unique<Debug>()) << "CheckDB: Verified " << p->utxoCt << " utxos ...";
}
}
if (const auto metact = readUtxoCtFromDB(); p->utxoCt != metact)
throw DatabaseError(QString("UTXO count in meta table (%1) does not match the actual number of UTXOs in the utxoset (%2)."
"\n\nThe database has been corrupted. Please delete the datadir and resynch to bitcoind.\n")
.arg(metact).arg(p->utxoCt.load()));
}
const auto elapsed = Util::getTimeNS();
Debug() << "CheckDB: Verified utxos in " << QString::number((elapsed-t0)/1e6, 'f', 3) << " msec";
} else {
p->utxoCt = readUtxoCtFromDB();
}
if (const auto ct = utxoSetSize(); ct)
Log() << "UTXO set: " << ct << Util::Pluralize(" utxo", ct)
<< ", " << QString::number(utxoSetSizeMiB(), 'f', 3) << " MiB";
}
void Storage::loadCheckEarliestUndo()
{
FatalAssert(!!p->db.undo) << __FUNCTION__ << ": Undo db is not open";
const auto t0 = Util::getTimeNS();
int ctr = 0;
{
std::unique_ptr<rocksdb::Iterator> iter(p->db.undo->NewIterator(p->db.defReadOpts));
if (!iter) throw DatabaseError("Unable to obtain an iterator to the undo db");
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
const auto keySlice = iter->key();
if (keySlice.size() != sizeof(uint32_t))
throw DatabaseFormatError("Unexpected key in undo database. We expect only 32-bit unsigned ints!");
const uint32_t height = DeserializeScalar<uint32_t>(FromSlice(keySlice));
if (height < p->earliestUndoHeight) p->earliestUndoHeight = height;