wr104
|
|
June 04, 2015, 07:40:29 PM |
|
We can simply modify few codes at bitcoin-qt to support 20MB or even 20GB block: if blocksize>20MB then blocksize=first 20MB, the rest stand in line wait for the next block.
or It can be phased in, like:
if (blocknumber > 115000 2000000) maxblocksize = largerlimit 20MB, the rest stand in line wait for the next block.
It can start being in versions way ahead, so by the time it reaches that block number and goes into effect, the older versions that don't have it are already obsolete.
When we're near the cutoff block number, I can put an alert to old versions to make sure they know they have to upgrade.
Why do we have to hard fork the blockchain? why not just modify these codes at bitcoin-qt, then every one using a new version bitcoin-qt can recognize 20MB of a lagger block given. Those using old version can only recognize the first 1MB of it. In my case, There is always only one blockchain. Unfortunately, it is not that simple because you are changing network rules. If you are not careful, you risk creating consensus forks which is the worst nightmare for the coin. You would need a little more code like the Git patch I wrote below for Bitcoin 0.9.5. Basically, you need a kill switch for older wallet versions (example: begin rejecting older block versions after 95% of the nodes have upgraded) and then, you allow larger block size after certain height number. (example 400,000 which is ~9 months for today). src/core.h | 2 +- src/init.cpp | 4 ++++ src/main.cpp | 30 ++++++++++++++++++++++++------ src/main.h | 11 ++++++++--- src/miner.cpp | 10 ++++++---- src/rpcmining.cpp | 10 ++++++++-- 6 files changed, 51 insertions(+), 16 deletions(-)
diff --git a/src/core.h b/src/core.h index d89f06b..01af749 100644 --- a/src/core.h +++ b/src/core.h @@ -345,7 +345,7 @@ class CBlockHeader { public: // header - static const int CURRENT_VERSION=3; + static const int CURRENT_VERSION=4; int nVersion; uint256 hashPrevBlock; uint256 hashMerkleRoot; diff --git a/src/init.cpp b/src/init.cpp index 6f9abca..d3f40be 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1103,6 +1103,10 @@ bool AppInit2(boost::thread_group& threadGroup) RandAddSeedPerfmon(); + // Check if the network can begin accepting larger block size + if (chainActive.Height() >= 400000) + fNewBlockSizeLimit = true; + //// debug print LogPrintf("mapBlockIndex.size() = %u\n", mapBlockIndex.size()); LogPrintf("nBestHeight = %d\n", chainActive.Height()); diff --git a/src/main.cpp b/src/main.cpp index a42bb8a..caa2352 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -47,6 +47,7 @@ bool fImporting = false; bool fReindex = false; bool fBenchmark = false; bool fTxIndex = false; +bool fNewBlockSizeLimit = false; unsigned int nCoinCacheSize = 5000; /** Fees smaller than this (in satoshi) are considered zero fee (for transaction creation) */ @@ -1809,6 +1810,7 @@ bool ConnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, C int64_t nFees = 0; int nInputs = 0; unsigned int nSigOps = 0; + const int nSigOpsLimit = fNewBlockSizeLimit ? MAX_BLOCK_SIGOPS : OLD_MAX_BLOCK_SIGOPS; CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size())); std::vector<std::pair<uint256, CDiskTxPos> > vPos; vPos.reserve(block.vtx.size()); @@ -1818,7 +1820,7 @@ bool ConnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, C nInputs += tx.vin.size(); nSigOps += GetLegacySigOpCount(tx); - if (nSigOps > MAX_BLOCK_SIGOPS) + if (nSigOps > nSigOpsLimit) return state.DoS(100, error("ConnectBlock() : too many sigops"), REJECT_INVALID, "bad-blk-sigops"); @@ -1834,7 +1836,7 @@ bool ConnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, C // this is to prevent a "rogue miner" from creating // an incredibly-expensive-to-validate block. nSigOps += GetP2SHSigOpCount(tx, view); - if (nSigOps > MAX_BLOCK_SIGOPS) + if (nSigOps > nSigOpsLimit) return state.DoS(100, error("ConnectBlock() : too many sigops"), REJECT_INVALID, "bad-blk-sigops"); } @@ -1966,6 +1968,10 @@ void static UpdateTip(CBlockIndex *pindexNew) { // strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user: strMiscWarning = _("Warning: This version is obsolete, upgrade required!"); } + // Check if the network is ready to accept larger block size + if (!fNewBlockSizeLimit && chainActive.Height() >= 400000) { + fNewBlockSizeLimit = true; + } } // Disconnect chainActive's tip. @@ -2319,7 +2325,8 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo // that can be verified before saving an orphan block. // Size limits - if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE) + const int nBlkSizeLimit = fNewBlockSizeLimit ? MAX_BLOCK_SIZE : OLD_MAX_BLOCK_SIZE; + if (block.vtx.empty() || block.vtx.size() > (nBlkSizeLimit / 60) || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION) > nBlkSizeLimit) return state.DoS(100, error("CheckBlock() : size limits failed"), REJECT_INVALID, "bad-blk-length"); @@ -2367,7 +2374,8 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo { nSigOps += GetLegacySigOpCount(tx); } - if (nSigOps > MAX_BLOCK_SIGOPS) + const int nSigOpsLimit = fNewBlockSizeLimit ? MAX_BLOCK_SIGOPS : OLD_MAX_BLOCK_SIGOPS; + if (nSigOps > nSigOpsLimit) return state.DoS(100, error("CheckBlock() : out-of-bounds SigOpCount"), REJECT_INVALID, "bad-blk-sigops", true); @@ -2426,7 +2434,7 @@ bool AcceptBlock(CBlock& block, CValidationState& state, CDiskBlockPos* dbp) // Reject block.nVersion=1 blocks when 95% (75% on testnet) of the network has upgraded: if (block.nVersion < 2) { - if ((!TestNet() && CBlockIndex::IsSuperMajority(2, pindexPrev, 950, 1000)) || + if (fNewBlockSizeLimit || (!TestNet() && CBlockIndex::IsSuperMajority(2, pindexPrev, 950, 1000)) || (TestNet() && CBlockIndex::IsSuperMajority(2, pindexPrev, 75, 100))) { return state.Invalid(error("AcceptBlock() : rejected nVersion=1 block"), @@ -2436,13 +2444,23 @@ bool AcceptBlock(CBlock& block, CValidationState& state, CDiskBlockPos* dbp) // Reject block.nVersion=2 blocks when 95% (75% on testnet) of the network has upgraded: if (block.nVersion < 3) { - if ((!TestNet() && CBlockIndex::IsSuperMajority(3, pindexPrev, 950, 1000)) || + if (fNewBlockSizeLimit || (!TestNet() && CBlockIndex::IsSuperMajority(3, pindexPrev, 950, 1000)) || (TestNet() && CBlockIndex::IsSuperMajority(3, pindexPrev, 75, 100))) { return state.Invalid(error("AcceptBlock() : rejected nVersion=2 block"), REJECT_OBSOLETE, "bad-version"); } } + // Reject block.nVersion=3 blocks when 95% (75% on testnet) of the network has upgraded: + if (block.nVersion < 4) + { + if (fNewBlockSizeLimit || (!TestNet() && CBlockIndex::IsSuperMajority(4, pindexPrev, 950, 1000)) || + (TestNet() && CBlockIndex::IsSuperMajority(4, pindexPrev, 75, 100))) + { + return state.Invalid(error("AcceptBlock() : rejected nVersion=3 block"), + REJECT_OBSOLETE, "bad-version"); + } + } // Enforce block.nVersion=2 rule that the coinbase starts with serialized block height if (block.nVersion >= 2) { diff --git a/src/main.h b/src/main.h index dc50dff..0b50ad4 100644 --- a/src/main.h +++ b/src/main.h @@ -33,8 +33,10 @@ class CBlockIndex; class CBloomFilter; class CInv; -/** The maximum allowed size for a serialized block, in bytes (network rule) */ -static const unsigned int MAX_BLOCK_SIZE = 1000000; +/** The NEW maximum allowed size for a serialized block, in bytes (network rule) */ +static const unsigned int MAX_BLOCK_SIZE = 20 * 1024 * 1024; +/** The OLD maximum allowed size for a serialized block, in bytes (network rule) */ +static const unsigned int OLD_MAX_BLOCK_SIZE = 1000000; /** Default for -blockmaxsize and -blockminsize, which control the range of sizes the mining code will create **/ static const unsigned int DEFAULT_BLOCK_MAX_SIZE = 750000; static const unsigned int DEFAULT_BLOCK_MIN_SIZE = 0; @@ -42,8 +44,10 @@ static const unsigned int DEFAULT_BLOCK_MIN_SIZE = 0; static const unsigned int DEFAULT_BLOCK_PRIORITY_SIZE = 50000; /** The maximum size for transactions we're willing to relay/mine */ static const unsigned int MAX_STANDARD_TX_SIZE = 100000; -/** The maximum allowed number of signature check operations in a block (network rule) */ +/** The NEW maximum allowed number of signature check operations in a block (network rule) */ static const unsigned int MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50; +/** The OLD maximum allowed number of signature check operations in a block (network rule) */ +static const unsigned int OLD_MAX_BLOCK_SIGOPS = OLD_MAX_BLOCK_SIZE / 50; /** Default for -maxorphantx, maximum number of orphan transactions kept in memory */ static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS = 100; /** Default for -maxorphanblocks, maximum number of orphan blocks kept in memory */ @@ -95,6 +99,7 @@ extern int64_t nTimeBestReceived; extern bool fImporting; extern bool fReindex; extern bool fBenchmark; +extern bool fNewBlockSizeLimit; extern int nScriptCheckThreads; extern bool fTxIndex; extern unsigned int nCoinCacheSize; diff --git a/src/miner.cpp b/src/miner.cpp index e8abb8c..d587fde 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -126,8 +126,9 @@ CBlockTemplate* CreateNewBlock(const CScript& scriptPubKeyIn) // Largest block you're willing to create: unsigned int nBlockMaxSize = GetArg("-blockmaxsize", DEFAULT_BLOCK_MAX_SIZE); - // Limit to betweeen 1K and MAX_BLOCK_SIZE-1K for sanity: - nBlockMaxSize = std::max((unsigned int)1000, std::min((unsigned int)(MAX_BLOCK_SIZE-1000), nBlockMaxSize)); + // Limit to betweeen 1K and MAX_BLOCK_SIZE-1K for sanity. After height 400,000 we allow miners to create larger blocks. + const int nBlkSizeLimit = (!TestNet() && (chainActive.Tip()->nHeight + 1) > 400000) ? MAX_BLOCK_SIZE : OLD_MAX_BLOCK_SIZE; + nBlockMaxSize = std::max((unsigned int)1000, std::min((unsigned int)(nBlkSizeLimit - 1000), nBlockMaxSize)); // How much of the block should be dedicated to high-priority transactions, // included regardless of the fees they pay @@ -228,6 +229,7 @@ CBlockTemplate* CreateNewBlock(const CScript& scriptPubKeyIn) uint64_t nBlockSize = 1000; uint64_t nBlockTx = 0; int nBlockSigOps = 100; + const int nSigOpsLimit = fNewBlockSizeLimit ? MAX_BLOCK_SIGOPS : OLD_MAX_BLOCK_SIGOPS; bool fSortedByFee = (nBlockPrioritySize <= 0); TxPriorityCompare comparer(fSortedByFee); @@ -250,7 +252,7 @@ CBlockTemplate* CreateNewBlock(const CScript& scriptPubKeyIn) // Legacy limits on sigOps: unsigned int nTxSigOps = GetLegacySigOpCount(tx); - if (nBlockSigOps + nTxSigOps >= MAX_BLOCK_SIGOPS) + if (nBlockSigOps + nTxSigOps >= nSigOpsLimit) continue; // Skip free transactions if we're past the minimum block size: @@ -273,7 +275,7 @@ CBlockTemplate* CreateNewBlock(const CScript& scriptPubKeyIn) int64_t nTxFees = view.GetValueIn(tx)-tx.GetValueOut(); nTxSigOps += GetP2SHSigOpCount(tx, view); - if (nBlockSigOps + nTxSigOps >= MAX_BLOCK_SIGOPS) + if (nBlockSigOps + nTxSigOps >= nSigOpsLimit) continue; CValidationState state; diff --git a/src/rpcmining.cpp b/src/rpcmining.cpp index ef99cb3..8597238 100644 --- a/src/rpcmining.cpp +++ b/src/rpcmining.cpp @@ -579,8 +579,14 @@ Value getblocktemplate(const Array& params, bool fHelp) result.push_back(Pair("mintime", (int64_t)pindexPrev->GetMedianTimePast()+1)); result.push_back(Pair("mutable", aMutable)); result.push_back(Pair("noncerange", "00000000ffffffff")); - result.push_back(Pair("sigoplimit", (int64_t)MAX_BLOCK_SIGOPS)); - result.push_back(Pair("sizelimit", (int64_t)MAX_BLOCK_SIZE)); + if (fNewBlockSizeLimit) { + result.push_back(Pair("sigoplimit", (int64_t)MAX_BLOCK_SIGOPS)); + result.push_back(Pair("sizelimit", (int64_t)MAX_BLOCK_SIZE)); + } + else { + result.push_back(Pair("sigoplimit", (int64_t)OLD_MAX_BLOCK_SIGOPS)); + result.push_back(Pair("sizelimit", (int64_t)OLD_MAX_BLOCK_SIZE)); + } result.push_back(Pair("curtime", (int64_t)pblock->nTime)); result.push_back(Pair("bits", HexBits(pblock->nBits))); result.push_back(Pair("height", (int64_t)(pindexPrev->nHeight+1)));
|