Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
4bd2bbc
Bump env and add xdr mismatch check back in
sisuresh May 22, 2023
7c51222
Cleanup
sisuresh May 23, 2023
f250f98
Correct initial network configs for contract cost
jayz22 May 22, 2023
a854d23
Implement refunds for the Soroban metadata fees.
dmkozh May 4, 2023
a55e49b
Reject txs that are too big before placing them into outbound queues
marta-lokhova May 22, 2023
8341b85
Fix test so config setting case doesn't loop forever
sisuresh May 19, 2023
ec0abb9
Fix test
sisuresh May 19, 2023
46c0f3c
Add guard
sisuresh May 20, 2023
5d2dd71
Add offline-close command for establishing test network state in quic…
graydon May 23, 2023
94c8e79
Make sed usage more compat with git refs
leighmcculloch May 23, 2023
e482f86
Avoid BUILD_TESTS code in offline-close, fix other nits
graydon May 24, 2023
f938e4e
Compiles with XDR changes
SirTyson May 4, 2023
f6d35ef
Added LIFETIME_EXTENSION merge behavior
SirTyson May 5, 2023
4d5c53a
Added BucketListDB lifetime extension tests
SirTyson May 16, 2023
03f9fcc
BucketListDB loads LIFETIME_EXTENSIONS properly
SirTyson May 16, 2023
be6d38f
Compiles with footprint changes
SirTyson May 16, 2023
5aa0859
Compiles with XDR rename
SirTyson May 17, 2023
d3a7e24
WIP bump via footprint
SirTyson May 18, 2023
006d99e
Only write LIFETIME_EXTENSION when lifetime changes
SirTyson May 18, 2023
214dc1f
Refactor type helpers and minor XDR updates
SirTyson May 24, 2023
ef55cbf
InvokeHostFunctionsTests mostly working
SirTyson May 25, 2023
4181106
Fixed bug with ContractData types and added unit test
SirTyson May 26, 2023
aebf564
Basic InvokeHostFunctionTests working with storage changes
SirTyson May 26, 2023
5b93a5b
Added ContractData type support to SQL with unit test
SirTyson May 26, 2023
f20795a
Compiles after rebase
SirTyson May 26, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 13 additions & 15 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions docs/software/commands.md
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,10 @@ Command options can only by placed after command.
HISTORY-LABEL. HISTORY-LABEL should be one of the history archives you have
specified in the stellar-core.cfg. This will write a
`.well-known/stellar-history.json` file in the archive root.
* **offline-close**: Forces stellar-core to close a specified number of empty
ledgers, strictly offline and starting from its current state, generating and
publishing history as it goes. Should only be used for special scenarios like
setting up test networks with artificial history.
* **offline-info**: Returns an output similar to `--c info` for an offline
instance, but written directly to standard output (ignoring log levels).
* **print-xdr <FILE-NAME>**: Pretty-print a binary file containing an XDR
Expand Down
2 changes: 1 addition & 1 deletion src/Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ main/StellarCoreVersion.cpp: always
@vers=$$(cd "$(srcdir)" \
&& git describe --always --dirty --tags 2>/dev/null \
|| echo "$(PACKAGE) $(VERSION)"); \
sed -e "s/%%VERSION%%/$$vers/" \
sed -e "s@%%VERSION%%@$$vers@" \
< "$(srcdir)/main/StellarCoreVersion.cpp.in" > $@~
@if cmp -s $@~ $@; then rm -f $@~; else \
mv -f $@~ $@ && printf "echo '%s' > $@\n" "$$(cat $@)"; fi
Expand Down
186 changes: 177 additions & 9 deletions src/bucket/Bucket.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "crypto/SHA.h"
#include "database/Database.h"
#include "ledger/LedgerHashUtils.h"
#include "ledger/LedgerTypeUtils.h"
#include "main/Application.h"
#include "medida/timer.h"
#include "util/Fs.h"
Expand Down Expand Up @@ -181,9 +182,14 @@ Bucket::getBucketEntry(LedgerKey const& k)
// If we find the entry, we remove the found key from keys so that later buckets
// do not load shadowed entries. If we don't find the entry, we do not remove it
// from keys so that it will be searched for again at a lower level.
// lifetimeExtensions stores a map of LedgerKeys -> lifetime extensions that
// should vbe applied whenever the corresponding DATA_ENTRY is loaded. Note that
// the keys in this map correspond to DATA_ENTRY, not LIFETIME_EXTENSION
void
Bucket::loadKeys(std::set<LedgerKey, LedgerEntryIdCmp>& keys,
std::vector<LedgerEntry>& result)
Bucket::loadKeys(
std::set<LedgerKey, LedgerEntryIdCmp>& keys,
std::vector<LedgerEntry>& result,
std::map<LedgerKey, uint32_t, LedgerEntryIdCmp>& lifetimeExtensions)
{
auto currKeyIt = keys.begin();
auto const& index = getIndex();
Expand All @@ -200,7 +206,43 @@ Bucket::loadKeys(std::set<LedgerKey, LedgerEntryIdCmp>& keys,
{
if (entryOp->type() != DEADENTRY)
{
result.push_back(entryOp->liveEntry());

#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION
if (isSorobanExtEntry(*currKeyIt))
{
auto k = *currKeyIt;
setLeType(k, ContractLedgerEntryType::DATA_ENTRY);
lifetimeExtensions.emplace(
k, getExpirationLedger(entryOp->liveEntry()));
}
else
#endif
{
if (isSorobanDataEntry(entryOp->liveEntry().data))
{
if (auto extIter =
lifetimeExtensions.find(*currKeyIt);
extIter != lifetimeExtensions.end())
{
setExpirationLedger(entryOp->liveEntry(),
extIter->second);
lifetimeExtensions.erase(extIter);
}
else
{
// If we haven't found an LIFETIME_EXTENSION
// entry yet, ext key is still in keys to
// search. Remove it to avoid redundant reads
// since we already found a newer DATA_ENTRY
auto extK = *currKeyIt;
setLeType(extK, ContractLedgerEntryType::
LIFETIME_EXTENSION);
keys.erase(extK);
}
}

result.push_back(entryOp->liveEntry());
}
}

currKeyIt = keys.erase(currKeyIt);
Expand Down Expand Up @@ -658,6 +700,38 @@ calculateMergeProtocolVersion(
}
}

// Lifetime extensions have a different LedgerKey than the entry they bump,
// but "refer" to the bumped entry. Returns true if inputs have the same key or
// if one input is a lifetime extension for the other entry
template <class T>
static bool
refersToSameEntry(T const& lhs, T const& rhs)
{
if (lhs == rhs)
{
return true;
}

#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION
// Return key equality check but ignore ContractLedgerEntryType
if (lhs.type() == rhs.type())
{
if (lhs.type() == CONTRACT_DATA)
{
return lhs.contractData().contractID ==
rhs.contractData().contractID &&
lhs.contractData().key == rhs.contractData().key;
}
else if (lhs.type() == CONTRACT_CODE)
{
return lhs.contractCode().hash == rhs.contractCode().hash;
}
}
#endif

return false;
}

// There are 4 "easy" cases for merging: exhausted iterators on either
// side, or entries that compare non-equal. In all these cases we just
// take the lesser (or existing) entry and advance only one iterator,
Expand All @@ -669,7 +743,27 @@ mergeCasesWithDefaultAcceptance(
std::vector<BucketInputIterator>& shadowIterators, uint32_t protocolVersion,
bool keepShadowedLifecycleEntries)
{
if (!ni || (oi && ni && cmp(*oi, *ni)))

auto key = [](auto const& be) {
LedgerKey k;
switch (be.type())
{
case LIVEENTRY:
case INITENTRY:
k = LedgerEntryKey(be.liveEntry());
break;
case DEADENTRY:
k = be.deadEntry();
break;
case METAENTRY:
throw std::runtime_error("Malformed bucket: Unexpected metaentry.");
}

return k;
};

if (!ni ||
(oi && ni && !refersToSameEntry(key(*oi), key(*ni)) && cmp(*oi, *ni)))
{
// Either of:
//
Expand All @@ -684,7 +778,8 @@ mergeCasesWithDefaultAcceptance(
++oi;
return true;
}
else if (!oi || (oi && ni && cmp(*ni, *oi)))
else if (!oi || (oi && ni && !refersToSameEntry(key(*oi), key(*ni)) &&
cmp(*ni, *oi)))
{
// Either of:
//
Expand Down Expand Up @@ -773,6 +868,26 @@ mergeCasesWithEqualKeys(MergeCounters& mc, BucketInputIterator& oi,
// because even if there is a subsequent (newer) INIT entry, the
// invariant is maintained for that newer entry too (it is still
// preceded by a DEAD state).
//
// For Soroban types, we must also consider which entries are
// LIFETIME_EXTENSION entries and DATA_ENTRIES. While LIFETIME_EXTENSION
// and DATA_ENTRIES have different keys, newer LIFETIME_EXTENSION entries
// merge into older DATA_ENTRY entries as follows:
//
// old | new | result
// ---------------+----------------+-------------------------------
// INIT | INIT | error
// LIVE | INIT | error
// DEAD | INIT=x | LIVE=x
// INIT=x | LIVE - DATA=y | INIT=y
// INIT=x | LIVE - EXT=y | INIT with lifetime=y, data=x
// LIVE - EXT=x | LIVE - EXT=y | LIVE=y
// LIVE - EXT=x | LIVE - DATA=y | LIVE=y
// LIVE - DATA=x | LIVE - EXT=y | LIVE with lifetime=y, data=x
// INIT | DEAD | empty
//
// Note that LIFETIME_EXTENSION entries may not be INIT entries but must be
// LIVEENTRIES

BucketEntry const& oldEntry = *oi;
BucketEntry const& newEntry = *ni;
Expand All @@ -781,6 +896,27 @@ mergeCasesWithEqualKeys(MergeCounters& mc, BucketInputIterator& oi,
countOldEntryType(mc, oldEntry);
countNewEntryType(mc, newEntry);

auto replaceLifetime = [](LedgerEntry& outEntry,
LedgerEntry const& lifetimeEntry) {
#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION
releaseAssert(refersToSameEntry(outEntry.data, lifetimeEntry.data));
if (auto t = outEntry.data.type(); t == CONTRACT_CODE)
{
outEntry.data.contractCode().expirationLedgerSeq =
lifetimeEntry.data.contractCode().expirationLedgerSeq;
}
else if (t == CONTRACT_DATA)
{
outEntry.data.contractData().expirationLedgerSeq =
lifetimeEntry.data.contractData().expirationLedgerSeq;
}
else
{
releaseAssert(false);
}
#endif
};

if (newEntry.type() == INITENTRY)
{
// The only legal new-is-INIT case is merging a delete+create to an
Expand All @@ -805,7 +941,19 @@ mergeCasesWithEqualKeys(MergeCounters& mc, BucketInputIterator& oi,
// Merge a create+update to a fresher create.
BucketEntry newInit;
newInit.type(INITENTRY);
newInit.liveEntry() = newEntry.liveEntry();

if (isSorobanExtEntry(newEntry.liveEntry().data))
{
// New entry is lifetime extension, keep oldEntry data with
// newEntry lifetime
newInit.liveEntry() = oldEntry.liveEntry();
replaceLifetime(newInit.liveEntry(), newEntry.liveEntry());
}
else
{
newInit.liveEntry() = newEntry.liveEntry();
}

++mc.mOldInitEntriesMergedWithNewLive;
maybePut(out, newInit, shadowIterators,
keepShadowedLifecycleEntries, mc);
Expand All @@ -823,10 +971,30 @@ mergeCasesWithEqualKeys(MergeCounters& mc, BucketInputIterator& oi,
}
else
{
// Neither is in INIT state, take the newer one.
// Neither is in INIT state

// TODO: Update merge counter with Soroban metrics
++mc.mNewEntriesMergedWithOldNeitherInit;
maybePut(out, newEntry, shadowIterators, keepShadowedLifecycleEntries,
mc);

// If new entry is lifetime extension and old
// entry is not, put oldEntry data with newEntry lifetime
if (newEntry.type() == LIVEENTRY && oldEntry.type() == LIVEENTRY &&
isSorobanExtEntry(newEntry.liveEntry().data) &&
!isSorobanExtEntry(oldEntry.liveEntry().data))
{
BucketEntry newResult;
newResult.type(LIVEENTRY);
newResult.liveEntry() = oldEntry.liveEntry();
replaceLifetime(newResult.liveEntry(), newEntry.liveEntry());
maybePut(out, newResult, shadowIterators,
keepShadowedLifecycleEntries, mc);
}
// Just take newer one
else
{
maybePut(out, newEntry, shadowIterators,
keepShadowedLifecycleEntries, mc);
}
}
++oi;
++ni;
Expand Down
Loading