Skip to content

Commit

Permalink
fix nodes config update after shuffling and add chain simulator scenario
Browse files Browse the repository at this point in the history
  • Loading branch information
AdoAdoAdo committed May 15, 2024
1 parent 1264241 commit f79acdf
Show file tree
Hide file tree
Showing 5 changed files with 140 additions and 27 deletions.
113 changes: 111 additions & 2 deletions integrationTests/chainSimulator/staking/stake/stakeAndUnStake_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@ import (
coreAPI "github.com/multiversx/mx-chain-core-go/data/api"
"github.com/multiversx/mx-chain-core-go/data/transaction"
"github.com/multiversx/mx-chain-core-go/data/validator"
logger "github.com/multiversx/mx-chain-logger-go"
"github.com/stretchr/testify/require"

"github.com/multiversx/mx-chain-go/common"
"github.com/multiversx/mx-chain-go/config"
chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator"
Expand All @@ -23,8 +26,6 @@ import (
chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process"
"github.com/multiversx/mx-chain-go/process"
"github.com/multiversx/mx-chain-go/vm"
logger "github.com/multiversx/mx-chain-logger-go"
"github.com/stretchr/testify/require"
)

const (
Expand Down Expand Up @@ -2383,6 +2384,114 @@ func TestChainSimulator_UnStakeOneActiveNodeAndCheckAPIAuctionList(t *testing.T)
require.Equal(t, 1, numUnQualified)
}

// Nodes configuration at genesis consisting of a total of 40 nodes, distributed on 3 shards + meta:
// - 4 eligible nodes/shard
// - 4 waiting nodes/shard
// - 2 nodes to shuffle per shard
// - max num nodes config for stakingV4 step3 = 32 (being downsized from previously 40 nodes)
// - with this config, we should always select max 8 nodes from auction list if there are > 40 nodes in the network
// This test will run with only 32 nodes and check that there are no nodes in the auction list,
// because of the lowWaitingList condition being triggered when in staking v4
func TestChainSimulator_EdgeCaseLowWaitingList(t *testing.T) {
if testing.Short() {
t.Skip("this is not a short test")
}

startTime := time.Now().Unix()
roundDurationInMillis := uint64(6000)
roundsPerEpoch := core.OptionalUint64{
HasValue: true,
Value: 20,
}

stakingV4Step1Epoch := uint32(2)
stakingV4Step2Epoch := uint32(3)
stakingV4Step3Epoch := uint32(4)

numOfShards := uint32(3)
cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{
BypassTxSignatureCheck: false,
TempDir: t.TempDir(),
PathToInitialConfig: defaultPathToInitialConfig,
NumOfShards: numOfShards,
GenesisTimestamp: startTime,
RoundDurationInMillis: roundDurationInMillis,
RoundsPerEpoch: roundsPerEpoch,
ApiInterface: api.NewNoApiInterface(),
MinNodesPerShard: 4,
MetaChainMinNodes: 4,
NumNodesWaitingListMeta: 2,
NumNodesWaitingListShard: 2,
AlterConfigsFunction: func(cfg *config.Configs) {
cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = stakingV4Step1Epoch
cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = stakingV4Step2Epoch
cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = stakingV4Step3Epoch

cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].MaxNumNodes = 40
cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].NodesToShufflePerShard = 2

cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = stakingV4Step3Epoch
cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].MaxNumNodes = 32
cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].NodesToShufflePerShard = 2
},
})
require.Nil(t, err)
require.NotNil(t, cs)

defer cs.Close()

epochToCheck := int32(stakingV4Step3Epoch + 1)
err = cs.GenerateBlocksUntilEpochIsReached(epochToCheck)
require.Nil(t, err)

metachainNode := cs.GetNodeHandler(core.MetachainShardId)
numQualified, numUnQualified := getNumQualifiedAndUnqualified(t, metachainNode)
require.Equal(t, 0, numQualified)
require.Equal(t, 0, numUnQualified)

// we always have 0 in auction list because of the lowWaitingList condition
epochToCheck += 1
err = cs.GenerateBlocksUntilEpochIsReached(epochToCheck)

Check failure on line 2454 in integrationTests/chainSimulator/staking/stake/stakeAndUnStake_test.go

View workflow job for this annotation

GitHub Actions / golangci linter

ineffectual assignment to err (ineffassign)
numQualified, numUnQualified = getNumQualifiedAndUnqualified(t, metachainNode)
require.Equal(t, 0, numQualified)
require.Equal(t, 0, numUnQualified)

// stake 16 mode nodes, these will go to auction list
stakeNodes(t, cs, 16)

epochToCheck += 1
err = cs.GenerateBlocksUntilEpochIsReached(epochToCheck)

Check failure on line 2463 in integrationTests/chainSimulator/staking/stake/stakeAndUnStake_test.go

View workflow job for this annotation

GitHub Actions / golangci linter

ineffectual assignment to err (ineffassign)
numQualified, numUnQualified = getNumQualifiedAndUnqualified(t, metachainNode)
// all the previously registered will be selected, as we have 24 nodes in eligible+waiting, 8 will shuffle out,
// but this time there will be not be lowWaitingList, as there are enough in auction, so we will end up with
// 24-8 = 16 nodes remaining + 16 from auction, to fill up all 32 positions
require.Equal(t, 16, numQualified)
require.Equal(t, 0, numUnQualified)
}

func stakeNodes(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, numTxs int) {
txs := make([]*transaction.Transaction, numTxs)
for i := 0; i < numTxs; i++ {
privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1)
require.Nil(t, err)
err = cs.AddValidatorKeys(privateKey)
require.Nil(t, err)

mintValue := big.NewInt(0).Add(staking.MinimumStakeValue, staking.OneEGLD)
validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue)
require.Nil(t, err)

txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature)
txs[i] = staking.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, staking.MinimumStakeValue, txDataField, staking.GasLimitForStakeOperation)
}
stakeTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted(txs, staking.MaxNumOfBlockToGenerateWhenExecutingTx)
require.Nil(t, err)
require.NotNil(t, stakeTxs)
require.Len(t, stakeTxs, numTxs)

require.Nil(t, cs.GenerateBlocks(1))
}

func stakeOneNode(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator) {
privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1)
require.Nil(t, err)
Expand Down
6 changes: 4 additions & 2 deletions sharding/nodesCoordinator/indexHashedNodesCoordinator.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed
ihnc.loadingFromDisk.Store(false)

ihnc.nodesCoordinatorHelper = ihnc
err = ihnc.setNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, nil, nil, arguments.Epoch)
err = ihnc.setNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, nil, nil, arguments.Epoch, false)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -260,6 +260,7 @@ func (ihnc *indexHashedNodesCoordinator) setNodesPerShards(
leaving map[uint32][]Validator,
shuffledOut map[uint32][]Validator,
epoch uint32,
lowWaitingList bool,
) error {
ihnc.mutNodesConfig.Lock()
defer ihnc.mutNodesConfig.Unlock()
Expand Down Expand Up @@ -299,6 +300,7 @@ func (ihnc *indexHashedNodesCoordinator) setNodesPerShards(
nodesConfig.waitingMap = waiting
nodesConfig.leavingMap = leaving
nodesConfig.shuffledOutMap = shuffledOut
nodesConfig.lowWaitingList = lowWaitingList
nodesConfig.shardID, isCurrentNodeValidator = ihnc.computeShardForSelfPublicKey(nodesConfig)
nodesConfig.selectors, err = ihnc.createSelectors(nodesConfig)
if err != nil {
Expand Down Expand Up @@ -685,7 +687,7 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa
resUpdateNodes.Leaving,
)

err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, resUpdateNodes.ShuffledOut, newEpoch)
err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, resUpdateNodes.ShuffledOut, newEpoch, resUpdateNodes.LowWaitingList)
if err != nil {
log.Error("set nodes per shard failed", "error", err.Error())
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func (ihnc *indexHashedNodesCoordinator) SetNodesConfigFromValidatorsInfo(epoch
resUpdateNodes.Leaving,
)

err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, resUpdateNodes.ShuffledOut, epoch)
err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, resUpdateNodes.ShuffledOut, epoch, resUpdateNodes.LowWaitingList)
if err != nil {
return err
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,16 @@ import (
"github.com/multiversx/mx-chain-core-go/data/endProcess"
"github.com/multiversx/mx-chain-core-go/hashing/blake2b"
"github.com/multiversx/mx-chain-core-go/hashing/sha256"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

"github.com/multiversx/mx-chain-go/common"
"github.com/multiversx/mx-chain-go/sharding/mock"
"github.com/multiversx/mx-chain-go/state"
"github.com/multiversx/mx-chain-go/testscommon/genericMocks"
"github.com/multiversx/mx-chain-go/testscommon/hashingMocks"
"github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock"
vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

func TestNewIndexHashedNodesCoordinatorWithRater_NilRaterShouldErr(t *testing.T) {
Expand All @@ -48,14 +49,14 @@ func TestNewIndexHashedGroupSelectorWithRater_OkValsShouldWork(t *testing.T) {
assert.Nil(t, err)
}

//------- LoadEligibleList
// ------- LoadEligibleList

func TestIndexHashedGroupSelectorWithRater_SetNilEligibleMapShouldErr(t *testing.T) {
t.Parallel()
waiting := createDummyNodesMap(2, 1, "waiting")
nc, _ := NewIndexHashedNodesCoordinator(createArguments())
ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{})
assert.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waiting, nil, nil, 0))
assert.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waiting, nil, nil, 0, false))
}

func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) {
Expand Down Expand Up @@ -113,7 +114,7 @@ func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) {
assert.Equal(t, eligibleMap[0], readEligible)
}

//------- functionality tests
// ------- functionality tests

func TestIndexHashedGroupSelectorWithRater_ComputeValidatorsGroup1ValidatorShouldNotCallGetRating(t *testing.T) {
t.Parallel()
Expand Down Expand Up @@ -149,7 +150,7 @@ func BenchmarkIndexHashedGroupSelectorWithRater_ComputeValidatorsGroup63of400(b
consensusGroupSize := 63
list := make([]Validator, 0)

//generate 400 validators
// generate 400 validators
for i := 0; i < 400; i++ {
list = append(list, newValidatorMock([]byte("pk"+strconv.Itoa(i)), 1, defaultSelectionChances))
}
Expand Down Expand Up @@ -219,7 +220,7 @@ func Test_ComputeValidatorsGroup63of400(t *testing.T) {
shardSize := uint32(400)
list := make([]Validator, 0)

//generate 400 validators
// generate 400 validators
for i := uint32(0); i < shardSize; i++ {
list = append(list, newValidatorMock([]byte(fmt.Sprintf("pk%v", i)), 1, defaultSelectionChances))
}
Expand Down Expand Up @@ -785,7 +786,7 @@ func BenchmarkIndexHashedGroupSelectorWithRater_TestExpandList(b *testing.B) {
}
}

//a := []int{1, 2, 3, 4, 5, 6, 7, 8}
// a := []int{1, 2, 3, 4, 5, 6, 7, 8}
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
rnd.Shuffle(len(array), func(i, j int) { array[i], array[j] = array[j], array[i] })
m2 := runtime.MemStats{}
Expand Down Expand Up @@ -820,7 +821,7 @@ func BenchmarkIndexHashedWithRaterGroupSelector_ComputeValidatorsGroup21of400(b
consensusGroupSize := 21
list := make([]Validator, 0)

//generate 400 validators
// generate 400 validators
for i := 0; i < 400; i++ {
list = append(list, newValidatorMock([]byte("pk"+strconv.Itoa(i)), 1, defaultSelectionChances))
}
Expand Down
27 changes: 14 additions & 13 deletions sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ import (
"github.com/multiversx/mx-chain-core-go/hashing"
"github.com/multiversx/mx-chain-core-go/hashing/sha256"
"github.com/multiversx/mx-chain-core-go/marshal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

"github.com/multiversx/mx-chain-go/common"
"github.com/multiversx/mx-chain-go/dataRetriever/dataPool"
"github.com/multiversx/mx-chain-go/epochStart"
Expand All @@ -31,8 +34,6 @@ import (
"github.com/multiversx/mx-chain-go/testscommon/hashingMocks"
"github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock"
vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

const stakingV4Epoch = 444
Expand Down Expand Up @@ -145,7 +146,7 @@ func validatorsPubKeys(validators []Validator) []string {
return pKeys
}

//------- NewIndexHashedNodesCoordinator
// ------- NewIndexHashedNodesCoordinator

func TestNewIndexHashedNodesCoordinator_NilHasherShouldErr(t *testing.T) {
t.Parallel()
Expand Down Expand Up @@ -247,7 +248,7 @@ func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) {
require.Nil(t, err)
}

//------- LoadEligibleList
// ------- LoadEligibleList

func TestIndexHashedNodesCoordinator_SetNilEligibleMapShouldErr(t *testing.T) {
t.Parallel()
Expand All @@ -256,7 +257,7 @@ func TestIndexHashedNodesCoordinator_SetNilEligibleMapShouldErr(t *testing.T) {
arguments := createArguments()

ihnc, _ := NewIndexHashedNodesCoordinator(arguments)
require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waitingMap, nil, nil, 0))
require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waitingMap, nil, nil, 0, false))
}

func TestIndexHashedNodesCoordinator_SetNilWaitingMapShouldErr(t *testing.T) {
Expand All @@ -266,7 +267,7 @@ func TestIndexHashedNodesCoordinator_SetNilWaitingMapShouldErr(t *testing.T) {
arguments := createArguments()

ihnc, _ := NewIndexHashedNodesCoordinator(arguments)
require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(eligibleMap, nil, nil, nil, 0))
require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(eligibleMap, nil, nil, nil, 0, false))
}

func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) {
Expand Down Expand Up @@ -319,7 +320,7 @@ func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) {
require.Equal(t, eligibleMap[0], readEligible)
}

//------- ComputeValidatorsGroup
// ------- ComputeValidatorsGroup

func TestIndexHashedNodesCoordinator_NewCoordinatorGroup0SizeShouldErr(t *testing.T) {
t.Parallel()
Expand Down Expand Up @@ -401,7 +402,7 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroupInvalidShardIdShouldE
require.Nil(t, list2)
}

//------- functionality tests
// ------- functionality tests

func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup1ValidatorShouldReturnSame(t *testing.T) {
t.Parallel()
Expand Down Expand Up @@ -558,7 +559,7 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10BlocksMe

mut := sync.Mutex{}

//consensusGroup := list[0:21]
// consensusGroup := list[0:21]
cacheMap := make(map[string]interface{})
lruCache := &mock.NodesCoordinatorCacheMock{
PutCalled: func(key []byte, value interface{}, sizeInBytes int) (evicted bool) {
Expand Down Expand Up @@ -1275,7 +1276,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldTriggerWrongConfigur
},
}

err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2)
err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2, false)
require.NoError(t, err)

value := <-chanStopNode
Expand All @@ -1301,7 +1302,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldNotTriggerWrongConfi
},
}

err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2)
err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2, false)
require.NoError(t, err)

require.Empty(t, chanStopNode)
Expand Down Expand Up @@ -1333,7 +1334,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldSetNodeTypeValidator
},
}

err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2)
err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2, false)
require.NoError(t, err)
require.True(t, setTypeWasCalled)
require.Equal(t, core.NodeTypeValidator, nodeTypeResult)
Expand Down Expand Up @@ -1365,7 +1366,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldSetNodeTypeObserver(
},
}

err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2)
err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2, false)
require.NoError(t, err)
require.True(t, setTypeWasCalled)
require.Equal(t, core.NodeTypeObserver, nodeTypeResult)
Expand Down

0 comments on commit f79acdf

Please sign in to comment.